Compare commits

..

No commits in common. "master" and "v2.52.0-rc1" have entirely different histories.

310 changed files with 13588 additions and 22725 deletions

8
.gitattributes vendored
View File

@ -1,13 +1,13 @@
* whitespace=trail,space
*.[ch] whitespace=indent,trail,space,incomplete diff=cpp
*.sh whitespace=indent,trail,space,incomplete text eol=lf
* whitespace=!indent,trail,space
*.[ch] whitespace=indent,trail,space diff=cpp
*.sh whitespace=indent,trail,space text eol=lf
*.perl text eol=lf diff=perl
*.pl text eof=lf diff=perl
*.pm text eol=lf diff=perl
*.py text eol=lf diff=python
*.bat text eol=crlf
CODE_OF_CONDUCT.md -whitespace
/Documentation/**/*.adoc text eol=lf whitespace=trail,space,incomplete
/Documentation/**/*.adoc text eol=lf
/command-list.txt text eol=lf
/GIT-VERSION-GEN text eol=lf
/mergetools/* text eol=lf

View File

@ -63,7 +63,7 @@ jobs:
origin \
${{ github.ref }} \
$args
- uses: actions/setup-go@v6
- uses: actions/setup-go@v5
with:
go-version: '>=1.16'
cache: false

View File

@ -123,7 +123,7 @@ jobs:
- name: zip up tracked files
run: git archive -o artifacts/tracked.tar.gz HEAD
- name: upload tracked files and build artifacts
uses: actions/upload-artifact@v5
uses: actions/upload-artifact@v4
with:
name: windows-artifacts
path: artifacts
@ -140,7 +140,7 @@ jobs:
cancel-in-progress: ${{ needs.ci-config.outputs.skip_concurrent == 'yes' }}
steps:
- name: download tracked files and build artifacts
uses: actions/download-artifact@v6
uses: actions/download-artifact@v5
with:
name: windows-artifacts
path: ${{github.workspace}}
@ -157,7 +157,7 @@ jobs:
run: ci/print-test-failures.sh
- name: Upload failed tests' directories
if: failure() && env.FAILED_TEST_ARTIFACTS != ''
uses: actions/upload-artifact@v5
uses: actions/upload-artifact@v4
with:
name: failed-tests-windows-${{ matrix.nr }}
path: ${{env.FAILED_TEST_ARTIFACTS}}
@ -208,7 +208,7 @@ jobs:
- name: zip up tracked files
run: git archive -o artifacts/tracked.tar.gz HEAD
- name: upload tracked files and build artifacts
uses: actions/upload-artifact@v5
uses: actions/upload-artifact@v4
with:
name: vs-artifacts
path: artifacts
@ -226,7 +226,7 @@ jobs:
steps:
- uses: git-for-windows/setup-git-for-windows-sdk@v1
- name: download tracked files and build artifacts
uses: actions/download-artifact@v6
uses: actions/download-artifact@v5
with:
name: vs-artifacts
path: ${{github.workspace}}
@ -244,7 +244,7 @@ jobs:
run: ci/print-test-failures.sh
- name: Upload failed tests' directories
if: failure() && env.FAILED_TEST_ARTIFACTS != ''
uses: actions/upload-artifact@v5
uses: actions/upload-artifact@v4
with:
name: failed-tests-windows-vs-${{ matrix.nr }}
path: ${{env.FAILED_TEST_ARTIFACTS}}
@ -270,7 +270,7 @@ jobs:
shell: pwsh
run: meson compile -C build
- name: Upload build artifacts
uses: actions/upload-artifact@v5
uses: actions/upload-artifact@v4
with:
name: windows-meson-artifacts
path: build
@ -292,23 +292,13 @@ jobs:
shell: pwsh
run: pip install meson ninja
- name: Download build artifacts
uses: actions/download-artifact@v6
uses: actions/download-artifact@v5
with:
name: windows-meson-artifacts
path: build
- name: Test
shell: pwsh
run: ci/run-test-slice-meson.sh build ${{matrix.nr}} 10
- name: print test failures
if: failure() && env.FAILED_TEST_ARTIFACTS != ''
shell: bash
run: ci/print-test-failures.sh
- name: Upload failed tests' directories
if: failure() && env.FAILED_TEST_ARTIFACTS != ''
uses: actions/upload-artifact@v4
with:
name: failed-tests-windows-meson-${{ matrix.nr }}
path: ${{env.FAILED_TEST_ARTIFACTS}}
run: meson test -C build --no-rebuild --print-errorlogs --slice "$(1+${{ matrix.nr }})/10"
regular:
name: ${{matrix.vector.jobname}} (${{matrix.vector.pool}})
@ -349,7 +339,7 @@ jobs:
run: ci/print-test-failures.sh
- name: Upload failed tests' directories
if: failure() && env.FAILED_TEST_ARTIFACTS != ''
uses: actions/upload-artifact@v5
uses: actions/upload-artifact@v4
with:
name: failed-tests-${{matrix.vector.jobname}}
path: ${{env.FAILED_TEST_ARTIFACTS}}
@ -443,13 +433,13 @@ jobs:
- run: ci/install-dependencies.sh
- run: useradd builder --create-home
- run: chown -R builder .
- run: chmod a+w $GITHUB_ENV && sudo --preserve-env --set-home --user=builder ci/run-build-and-tests.sh
- run: sudo --preserve-env --set-home --user=builder ci/run-build-and-tests.sh
- name: print test failures
if: failure() && env.FAILED_TEST_ARTIFACTS != ''
run: sudo --preserve-env --set-home --user=builder ci/print-test-failures.sh
- name: Upload failed tests' directories
if: failure() && env.FAILED_TEST_ARTIFACTS != ''
uses: actions/upload-artifact@v5
uses: actions/upload-artifact@v4
with:
name: failed-tests-${{matrix.vector.jobname}}
path: ${{env.FAILED_TEST_ARTIFACTS}}

View File

@ -53,7 +53,6 @@ MAN7_TXT += gitcli.adoc
MAN7_TXT += gitcore-tutorial.adoc
MAN7_TXT += gitcredentials.adoc
MAN7_TXT += gitcvs-migration.adoc
MAN7_TXT += gitdatamodel.adoc
MAN7_TXT += gitdiffcore.adoc
MAN7_TXT += giteveryday.adoc
MAN7_TXT += gitfaq.adoc
@ -143,7 +142,6 @@ TECH_DOCS += technical/shallow
TECH_DOCS += technical/sparse-checkout
TECH_DOCS += technical/sparse-index
TECH_DOCS += technical/trivial-merge
TECH_DOCS += technical/unambiguous-types
TECH_DOCS += technical/unit-tests
SP_ARTICLES += $(TECH_DOCS)
SP_ARTICLES += technical/api-index

View File

@ -17,10 +17,10 @@ UI, Workflows & Features
* A new command "git last-modified" has been added to show the closest
ancestor commit that touched each path.
* The "git refs exists" command that works like "git show-ref --exists"
has been added.
* "git refs exists" that works like "git show-ref --exists" has been
added.
* "git repo info" learns the short-hand option "-z" that is the same as
* "repo info" learns a short-hand option "-z" that is the same as
"--format=nul", and learns to report the objects format used in the
repository.
@ -53,7 +53,7 @@ UI, Workflows & Features
* Configuration variables that take a pathname as a value
(e.g. blame.ignorerevsfile) can be marked as optional by prefixing
":(optional)" before its value.
":(optoinal)" before its value.
* Show 'P'ipe command in "git add -p".
@ -182,14 +182,6 @@ Performance, Internal Implementation, Development Support etc.
* The "debug" ref-backend was missing a method implementation, which
has been corrected.
* Build procedure for Wincred credential helper has been updated.
* The build procedure based on meson learned to allow builders to
specify the directory to install HTML documents.
* Building "git contacts" script (in contrib/) left the resulting
file unexecutable, which has been corrected.
Fixes since v2.51
-----------------
@ -433,10 +425,7 @@ including security updates, are included in this release.
* The version of macos image used in GitHub CI has been updated to
macos-14, as the macos-13 that we have been using got deprecated.
Perforce binary used there has been changed to arm64 version to
match.
(merge 73b9cdb7c4 jc/ci-use-macos-14 later to maint).
(merge ffff0bb0da jc/ci-use-arm64-p4-on-macos later to maint).
* Other code cleanup, docfix, build fix, etc.
(merge 529a60a885 ua/t1517-short-help-tests later to maint).

View File

@ -1,265 +0,0 @@
Git v2.53 Release Notes
=======================
UI, Workflows & Features
------------------------
* "git maintenance" command learned "is-needed" subcommand to tell if
it is necessary to perform various maintenance tasks.
* "git replay" (experimental) learned to perform ref updates itself
in a transaction by default, instead of emitting where each refs
should point at and leaving the actual update to another command.
* "git blame" learns "--diff-algorithm=<algo>" option.
* "git repo info" learned "--all" option.
* Both "git apply" and "git diff" learn a new whitespace error class,
"incomplete-line".
* Add a new manual that describes the data model.
* "git fast-import" learns "--strip-if-invalid" option to drop
invalid cryptographic signature from objects.
* The use of "revision" (a connected set of commits) has been
clarified in the "git replay" documentation.
* A help message from "git branch" now mentions "git help" instead of
"man" when suggesting to read some documentation.
* "git repo struct" learned to take "-z" as a synonym to "--format=nul".
* More object database related information are shown in "git repo
structure" output.
Performance, Internal Implementation, Development Support etc.
--------------------------------------------------------------
* The list of packfiles used in a running Git process is moved from
the packed_git structure into the packfile store.
* Some ref backend storage can hold not just the object name of an
annotated tag, but the object name of the object the tag points at.
The code to handle this information has been streamlined.
* As "git diff --quiet" only cares about the existence of any
changes, disable rename/copy detection to skip more expensive
processing whose result will be discarded anyway.
* A part of code paths that deals with loose objects has been cleaned
up.
* "make strip" has been taught to strip "scalar" as well as "git".
* Dockerised jobs at the GitHub Actions CI have been taught to show
more details of failed tests.
* Code refactoring around object database sources.
* Halve the memory consumed by artificial filepairs created during
"git diff --find-copioes-harder", also making the operation run
faster.
* The "git_istream" abstraction has been revamped to make it easier
to interface with pluggable object database design.
* Rewrite the only use of "mktemp()" that is subject to TOCTOU race
and Stop using the insecure "mktemp()" function.
(merge 10bba537c4 rs/ban-mktemp later to maint).
* In-code comment update to clarify that single-letter options are
outside of the scope of command line completion script.
(merge dc8a00fafe jc/completion-no-single-letter-options later to maint).
* MEMZERO_ARRAY() helper is introduced to avoid clearing only the
first N bytes of an N-element array whose elements are larger than
a byte.
* "git diff-files -R --find-copies-harder" has been taught to use
the potential copy sources from the index correctly.
* Require C99 style flexible array member support from all platforms.
* The code path that enumerates promisor objects have been optimized
to skip pointlessly parsing blob objects.
* Prepare test suite for Git for Windows that supports symbolic
links.
* Use hook API to replace ad-hoc invocation of hook scripts with the
run_command() API.
Fixes since v2.52
-----------------
* Ever since we added whitespace rules for this project, we misspelt
an entry, which has been corrected.
(merge 358e94dc70 jc/gitattributes-whitespace-no-indent-fix later to maint).
* The code to expand attribute macros has been rewritten to avoid
recursion to avoid running out of stack space in an uncontrolled
way.
(merge 42ed046866 jk/attr-macroexpand-wo-recursion later to maint).
* Adding a repository that uses a different hash function is a no-no,
but "git submodule add" did nt prevent it, which has been corrected.
(merge 6fe288bfbc bc/submodule-force-same-hash later to maint).
* An earlier check added to osx keychain credential helper to avoid
storing the credential itself supplied was overeager and rejected
credential material supplied by other helper backends that it would
have wanted to store, which has been corrected.
(merge 4580bcd235 kn/osxkeychain-idempotent-store-fix later to maint).
* The "git repo structure" subcommand tried to align its output but
mixed up byte count and display column width, which has been
corrected.
(merge 7a03a10a3a jx/repo-struct-utf8width-fix later to maint).
* Yet another corner case fix around renames in the "ort" merge
strategy.
(merge a562d90a35 en/ort-rename-another-fix later to maint).
* Test leakfix.
(merge 14b561e768 jk/test-mktemp-leakfix later to maint).
* Update a version of action used at the GitHub Actrions CI.
(merge cd99203f86 js/ci-github-setup-go-update later to maint).
* The "return errno = EFOO, -1" construct, which is heavily used in
compat/mingw.c and triggers warnings under "-Wcomma", has been
rewritten to avoid the warnings.
(merge af3919816f js/mingw-assign-comma-fix later to maint).
* Makefile based build have recently been updated to build a
libgit.a that also has reftable and xdiff objects; CMake based
build procedure has been updated to match.
(merge b0d5c88cca js/cmake-libgit-fix later to maint).
* Under-allocation fix.
(merge d22a488482 js/wincred-get-credential-alloc-fix later to maint).
* "git worktree list" attempts to show paths to worktrees while
aligning them, but miscounted display columns for the paths when
non-ASCII characters were involved, which has been corrected.
(merge 08dfa59835 pw/worktree-list-display-width-fix later to maint).
* "Windows+meson" job at the GitHub Actions CI was hard to debug, as
it did not show and save failed test artifacts, which has been
corrected.
(merge 17bd1108ea jk/ci-windows-meson-test-fix later to maint).
* Emulation code clean-up.
(merge 2367c6bcd6 gf/win32-pthread-cond-wait-err later to maint).
* Various issues detected by Asan have been corrected.
(merge a031b6181a jk/asan-bonanza later to maint).
* "git config get --path" segfaulted on an ":(optional)path" that
does not exist, which has been corrected.
(merge 0bd16856ff jc/optional-path later to maint).
* The "--committer-date-is-author-date" option of "git am/rebase" is
a misguided one. The documentation is updated to discourage its
use.
(merge fbf3d0669f kh/doc-committer-date-is-author-date later to maint).
* The option help text given by "git config unset -h" described
the "--all" option to "replace", not "unset", multiple variables,
which has been corrected.
(merge 18bf67b753 rs/config-unset-opthelp-fix later to maint).
* The error message given by "git config set", when the variable
being updated has more than one values defined, used old style "git
config" syntax with an incorrect option in its hint, both of which
have been corrected.
(merge df963f0df4 rs/config-set-multi-error-message-fix later to maint).
* "git replay" forgot to omit the "gpgsig-sha256" extended header
from the resulting commit the same way it omits "gpgsig", which has
been corrected.
(merge 9f3a115087 pw/replay-exclude-gpgsig-fix later to maint).
* A few tests have been updated to work under the shell compatible
mode of zsh.
(merge a92f243a94 bc/zsh-testsuite later to maint).
* The way patience diff finds LCS has been optimized.
(merge c7e3b8085b yc/xdiff-patience-optim later to maint).
* Recent optimization to "last-modified" command introduced use of
uninitialized block of memory, which has been corrected.
(merge fe4e60759b tc/last-modified-active-paths-optimization later to maint).
* "git last-modified" used to mishandle "--" to mark the beginning of
pathspec, which has been corrected.
(merge 05491b90ce js/last-modified-with-sparse-checkouts later to maint).
* Emulation code clean-up.
(merge 42aa7603aa gf/win32-pthread-cond-init later to maint).
* "git submodule add" to add a submodule under <name> segfaulted,
when a submodule.<name>.something is already in .gitmodules file
without defining where its submodule.<name>.path is, which has been
corrected.
(merge dd8e8c786e jc/submodule-add later to maint).
* "git fetch" that involves fetching tags, when a tag being fetched
needs to overwrite existing one, failed to fetch other tags, which
has been corrected.
(merge b7b17ec8a6 kn/fix-fetch-backfill-tag-with-batched-ref-updates later to maint).
* Document "rev-list --filter-provided-objects" better.
(merge 6d8dc99478 jt/doc-rev-list-filter-provided-objects later to maint).
* Even when there is no changes in the packfile and no need to
recompute bitmaps, "git repack" recomputed and updated the MIDX
file, which has been corrected.
(merge 6ce9d558ce ps/repack-avoid-noop-midx-rewrite later to maint).
* Update HTTP tests to adjust for changes in curl 8.18.0
(merge 17f4b01da7 jk/test-curl-updates later to maint).
* Workaround the "iconv" shipped as part of macOS, which is broken
handling stateful ISO/IEC 2022 encoded strings.
(merge cee341e9dd rs/macos-iconv-workaround later to maint).
* Running "git diff" with "--name-only" and other options that allows
us not to look at the blob contents, while objects that are lazily
fetched from a promisor remote, caused use-after-free, which has
been corrected.
* The ort merge machinery hit an assertion failure in a history with
criss-cross merges renamed a directory and a non-directory, which
has been corrected.
(merge 979ee83e8a en/ort-recursive-d-f-conflict-fix later to maint).
* Other code cleanup, docfix, build fix, etc.
(merge 46207a54cc qj/doc-http-bad-want-response later to maint).
(merge df90eccd93 kh/doc-commit-extra-references later to maint).
(merge f18aa68861 rs/xmkstemp-simplify later to maint).
(merge fddba8f737 ja/doc-synopsis-style later to maint).
(merge 22ce0cb639 en/xdiff-cleanup-2 later to maint).
(merge 8ef7355a8f je/doc-pull later to maint).
(merge 48176f953f jc/capability-leak later to maint).
(merge 8cbbdc92f7 kh/doc-pre-commit-fix later to maint).
(merge d4bc39a4d9 mh/doc-config-gui-gcwarning later to maint).
(merge 41d425008a kh/doc-send-email-paragraph-fix later to maint).
(merge d4b732899e jc/macports-darwinports later to maint).
(merge bab391761d kj/pull-options-decl-cleanup later to maint).
(merge 007b8994d4 rs/t4014-git-version-string-fix later to maint).
(merge 4ce170c522 ds/doc-scalar-config later to maint).
(merge a0c813951a jc/doc-commit-signoff-config later to maint).
(merge 8ee262985a ja/doc-misc-fixes later to maint).
(merge 1722c2244b mh/doc-core-attributesfile later to maint).
(merge c469ca26c5 dk/ci-rust-fix later to maint).
(merge 12f0be0857 gf/clear-path-cache-cleanup later to maint).
(merge 949df6ed6b js/test-func-comment-fix later to maint).
(merge 93f894c001 bc/checkout-error-message-fix later to maint).
(merge abf05d856f rs/show-branch-prio-queue later to maint).
(merge 06188ea5f3 rs/parse-config-expiry-simplify later to maint).
(merge 861dbb1586 dd/t5403-modernise later to maint).

View File

@ -492,9 +492,10 @@ core.askPass::
command-line argument and write the password on its STDOUT.
core.attributesFile::
Specifies the pathname to the file that contains attributes (see
linkgit:gitattributes[5]), in addition to `.gitattributes` (per-directory)
and `.git/info/attributes`. Its default value is
In addition to `.gitattributes` (per-directory) and
`.git/info/attributes`, Git looks into this file for attributes
(see linkgit:gitattributes[5]). Path expansions are made the same
way as for `core.excludesFile`. Its default value is
`$XDG_CONFIG_HOME/git/attributes`. If `$XDG_CONFIG_HOME` is either not
set or empty, `$HOME/.config/git/attributes` is used instead.
@ -628,8 +629,6 @@ core.whitespace::
part of the line terminator, i.e. with it, `trailing-space`
does not trigger if the character before such a carriage-return
is not a whitespace (not enabled by default).
* `incomplete-line` treats the last line of a file that is missing the
newline at the end as an error (not enabled by default).
* `tabwidth=<n>` tells how many character positions a tab occupies; this
is relevant for `indent-with-non-tab` and when Git fixes `tab-in-indent`
errors. The default tab width is 8. Allowed values are 1 to 63.

View File

@ -1,32 +1,32 @@
`fetch.recurseSubmodules`::
fetch.recurseSubmodules::
This option controls whether `git fetch` (and the underlying fetch
in `git pull`) will recursively fetch into populated submodules.
This option can be set either to a boolean value or to `on-demand`.
This option can be set either to a boolean value or to 'on-demand'.
Setting it to a boolean changes the behavior of fetch and pull to
recurse unconditionally into submodules when set to true or to not
recurse at all when set to false. When set to `on-demand`, fetch and
recurse at all when set to false. When set to 'on-demand', fetch and
pull will only recurse into a populated submodule when its
superproject retrieves a commit that updates the submodule's
reference.
Defaults to `on-demand`, or to the value of `submodule.recurse` if set.
Defaults to 'on-demand', or to the value of 'submodule.recurse' if set.
`fetch.fsckObjects`::
fetch.fsckObjects::
If it is set to true, git-fetch-pack will check all fetched
objects. See `transfer.fsckObjects` for what's
checked. Defaults to `false`. If not set, the value of
checked. Defaults to false. If not set, the value of
`transfer.fsckObjects` is used instead.
`fetch.fsck.<msg-id>`::
fetch.fsck.<msg-id>::
Acts like `fsck.<msg-id>`, but is used by
linkgit:git-fetch-pack[1] instead of linkgit:git-fsck[1]. See
the `fsck.<msg-id>` documentation for details.
`fetch.fsck.skipList`::
fetch.fsck.skipList::
Acts like `fsck.skipList`, but is used by
linkgit:git-fetch-pack[1] instead of linkgit:git-fsck[1]. See
the `fsck.skipList` documentation for details.
`fetch.unpackLimit`::
fetch.unpackLimit::
If the number of objects fetched over the Git native
transfer is below this
limit, then the objects will be unpacked into loose object
@ -37,12 +37,12 @@
especially on slow filesystems. If not set, the value of
`transfer.unpackLimit` is used instead.
`fetch.prune`::
fetch.prune::
If true, fetch will automatically behave as if the `--prune`
option was given on the command line. See also `remote.<name>.prune`
and the PRUNING section of linkgit:git-fetch[1].
`fetch.pruneTags`::
fetch.pruneTags::
If true, fetch will automatically behave as if the
`refs/tags/*:refs/tags/*` refspec was provided when pruning,
if not set already. This allows for setting both this option
@ -50,41 +50,41 @@
refs. See also `remote.<name>.pruneTags` and the PRUNING
section of linkgit:git-fetch[1].
`fetch.all`::
fetch.all::
If true, fetch will attempt to update all available remotes.
This behavior can be overridden by passing `--no-all` or by
explicitly specifying one or more remote(s) to fetch from.
Defaults to `false`.
Defaults to false.
`fetch.output`::
fetch.output::
Control how ref update status is printed. Valid values are
`full` and `compact`. Default value is `full`. See the
OUTPUT section in linkgit:git-fetch[1] for details.
`fetch.negotiationAlgorithm`::
fetch.negotiationAlgorithm::
Control how information about the commits in the local repository
is sent when negotiating the contents of the packfile to be sent by
the server. Set to `consecutive` to use an algorithm that walks
over consecutive commits checking each one. Set to `skipping` to
the server. Set to "consecutive" to use an algorithm that walks
over consecutive commits checking each one. Set to "skipping" to
use an algorithm that skips commits in an effort to converge
faster, but may result in a larger-than-necessary packfile; or set
to `noop` to not send any information at all, which will almost
to "noop" to not send any information at all, which will almost
certainly result in a larger-than-necessary packfile, but will skip
the negotiation step. Set to `default` to override settings made
the negotiation step. Set to "default" to override settings made
previously and use the default behaviour. The default is normally
`consecutive`, but if `feature.experimental` is `true`, then the
default is `skipping`. Unknown values will cause `git fetch` to
"consecutive", but if `feature.experimental` is true, then the
default is "skipping". Unknown values will cause 'git fetch' to
error out.
+
See also the `--negotiate-only` and `--negotiation-tip` options to
linkgit:git-fetch[1].
`fetch.showForcedUpdates`::
Set to `false` to enable `--no-show-forced-updates` in
fetch.showForcedUpdates::
Set to false to enable `--no-show-forced-updates` in
linkgit:git-fetch[1] and linkgit:git-pull[1] commands.
Defaults to `true`.
Defaults to true.
`fetch.parallel`::
fetch.parallel::
Specifies the maximal number of fetch operations to be run in parallel
at a time (submodules, or remotes when the `--multiple` option of
linkgit:git-fetch[1] is in effect).
@ -94,16 +94,16 @@ A value of 0 will give some reasonable default. If unset, it defaults to 1.
For submodules, this setting can be overridden using the `submodule.fetchJobs`
config setting.
`fetch.writeCommitGraph`::
fetch.writeCommitGraph::
Set to true to write a commit-graph after every `git fetch` command
that downloads a pack-file from a remote. Using the `--split` option,
most executions will create a very small commit-graph file on top of
the existing commit-graph file(s). Occasionally, these files will
merge and the write may take longer. Having an updated commit-graph
file helps performance of many Git commands, including `git merge-base`,
`git push -f`, and `git log --graph`. Defaults to `false`.
`git push -f`, and `git log --graph`. Defaults to false.
`fetch.bundleURI`::
fetch.bundleURI::
This value stores a URI for downloading Git object data from a bundle
URI before performing an incremental fetch from the origin Git server.
This is similar to how the `--bundle-uri` option behaves in
@ -115,9 +115,9 @@ If you modify this value and your repository has a `fetch.bundleCreationToken`
value, then remove that `fetch.bundleCreationToken` value before fetching from
the new bundle URI.
`fetch.bundleCreationToken`::
fetch.bundleCreationToken::
When using `fetch.bundleURI` to fetch incrementally from a bundle
list that uses the "`creationToken`" heuristic, this config value
list that uses the "creationToken" heuristic, this config value
stores the maximum `creationToken` value of the downloaded bundles.
This value is used to prevent downloading bundles in the future
if the advertised `creationToken` is not strictly larger than this

View File

@ -55,8 +55,3 @@ gui.blamehistoryctx::
linkgit:gitk[1] for the selected commit, when the `Show History
Context` menu item is invoked from 'git gui blame'. If this
variable is set to zero, the whole history is shown.
gui.GCWarning::
Determines whether linkgit:git-gui[1] should prompt for garbage
collection when git detects a large number of loose objects in
the repository. The default value is "true".

View File

@ -1,15 +1,15 @@
`push.autoSetupRemote`::
If set to `true` assume `--set-upstream` on default push when no
push.autoSetupRemote::
If set to "true" assume `--set-upstream` on default push when no
upstream tracking exists for the current branch; this option
takes effect with `push.default` options `simple`, `upstream`,
and `current`. It is useful if by default you want new branches
takes effect with push.default options 'simple', 'upstream',
and 'current'. It is useful if by default you want new branches
to be pushed to the default remote (like the behavior of
`push.default=current`) and you also want the upstream tracking
'push.default=current') and you also want the upstream tracking
to be set. Workflows most likely to benefit from this option are
`simple` central workflows where all branches are expected to
'simple' central workflows where all branches are expected to
have the same name on the remote.
`push.default`::
push.default::
Defines the action `git push` should take if no refspec is
given (whether from the command-line, config, or elsewhere).
Different values are well-suited for
@ -18,28 +18,24 @@
`upstream` is probably what you want. Possible values are:
+
--
`nothing`;;
do not push anything (error out) unless a refspec is
given. This is primarily meant for people who want to
avoid mistakes by always being explicit.
`current`;;
push the current branch to update a branch with the same
name on the receiving end. Works in both central and non-central
workflows.
* `nothing` - do not push anything (error out) unless a refspec is
given. This is primarily meant for people who want to
avoid mistakes by always being explicit.
`upstream`;;
push the current branch back to the branch whose
changes are usually integrated into the current branch (which is
called `@{upstream}`). This mode only makes sense if you are
pushing to the same repository you would normally pull from
(i.e. central workflow).
* `current` - push the current branch to update a branch with the same
name on the receiving end. Works in both central and non-central
workflows.
`tracking`;;
this is a deprecated synonym for `upstream`.
* `upstream` - push the current branch back to the branch whose
changes are usually integrated into the current branch (which is
called `@{upstream}`). This mode only makes sense if you are
pushing to the same repository you would normally pull from
(i.e. central workflow).
`simple`;;
push the current branch with the same name on the remote.
* `tracking` - This is a deprecated synonym for `upstream`.
* `simple` - push the current branch with the same name on the remote.
+
If you are working on a centralized workflow (pushing to the same repository you
pull from, which is typically `origin`), then you need to configure an upstream
@ -48,17 +44,16 @@ branch with the same name.
This mode is the default since Git 2.0, and is the safest option suited for
beginners.
`matching`;;
push all branches having the same name on both ends.
This makes the repository you are pushing to remember the set of
branches that will be pushed out (e.g. if you always push `maint`
and `master` there and no other branches, the repository you push
to will have these two branches, and your local `maint` and
`master` will be pushed there).
* `matching` - push all branches having the same name on both ends.
This makes the repository you are pushing to remember the set of
branches that will be pushed out (e.g. if you always push 'maint'
and 'master' there and no other branches, the repository you push
to will have these two branches, and your local 'maint' and
'master' will be pushed there).
+
To use this mode effectively, you have to make sure _all_ the
branches you would push out are ready to be pushed out before
running `git push`, as the whole point of this mode is to allow you
running 'git push', as the whole point of this mode is to allow you
to push all of the branches in one go. If you usually finish work
on only one branch and push out the result, while other branches are
unfinished, this mode is not for you. Also this mode is not
@ -71,24 +66,24 @@ new default).
--
`push.followTags`::
push.followTags::
If set to true, enable `--follow-tags` option by default. You
may override this configuration at time of push by specifying
`--no-follow-tags`.
`push.gpgSign`::
May be set to a boolean value, or the string `if-asked`. A true
push.gpgSign::
May be set to a boolean value, or the string 'if-asked'. A true
value causes all pushes to be GPG signed, as if `--signed` is
passed to linkgit:git-push[1]. The string `if-asked` causes
passed to linkgit:git-push[1]. The string 'if-asked' causes
pushes to be signed if the server supports it, as if
`--signed=if-asked` is passed to `git push`. A false value may
`--signed=if-asked` is passed to 'git push'. A false value may
override a value from a lower-priority config file. An explicit
command-line flag always overrides this config option.
`push.pushOption`::
push.pushOption::
When no `--push-option=<option>` argument is given from the
command line, `git push` behaves as if each _<option>_ of
this variable is given as `--push-option=<option>`.
command line, `git push` behaves as if each <value> of
this variable is given as `--push-option=<value>`.
+
This is a multi-valued variable, and an empty value can be used in a
higher priority configuration file (e.g. `.git/config` in a
@ -114,26 +109,26 @@ This will result in only b (a and c are cleared).
----
`push.recurseSubmodules`::
May be `check`, `on-demand`, `only`, or `no`, with the same behavior
as that of `push --recurse-submodules`.
If not set, `no` is used by default, unless `submodule.recurse` is
set (in which case a `true` value means `on-demand`).
push.recurseSubmodules::
May be "check", "on-demand", "only", or "no", with the same behavior
as that of "push --recurse-submodules".
If not set, 'no' is used by default, unless 'submodule.recurse' is
set (in which case a 'true' value means 'on-demand').
`push.useForceIfIncludes`::
If set to `true`, it is equivalent to specifying
push.useForceIfIncludes::
If set to "true", it is equivalent to specifying
`--force-if-includes` as an option to linkgit:git-push[1]
in the command line. Adding `--no-force-if-includes` at the
time of push overrides this configuration setting.
`push.negotiate`::
If set to `true`, attempt to reduce the size of the packfile
push.negotiate::
If set to "true", attempt to reduce the size of the packfile
sent by rounds of negotiation in which the client and the
server attempt to find commits in common. If `false`, Git will
server attempt to find commits in common. If "false", Git will
rely solely on the server's ref advertisement to find commits
in common.
`push.useBitmaps`::
If set to `false`, disable use of bitmaps for `git push` even if
`pack.useBitmaps` is `true`, without preventing other git operations
from using bitmaps. Default is `true`.
push.useBitmaps::
If set to "false", disable use of bitmaps for "git push" even if
`pack.useBitmaps` is "true", without preventing other git operations
from using bitmaps. Default is true.

View File

@ -1,11 +0,0 @@
replay.refAction::
Specifies the default mode for handling reference updates in
`git replay`. The value can be:
+
--
* `update`: Update refs directly using an atomic transaction (default behavior).
* `print`: Output update-ref commands for pipeline use.
--
+
This setting can be overridden with the `--ref-action` command-line option.
When not configured, `git replay` defaults to `update` mode.

View File

@ -1,20 +0,0 @@
`--diff-algorithm=(patience|minimal|histogram|myers)`::
Choose a diff algorithm. The variants are as follows:
+
--
`default`;;
`myers`;;
The basic greedy diff algorithm. Currently, this is the default.
`minimal`;;
Spend extra time to make sure the smallest possible diff is
produced.
`patience`;;
Use "patience diff" algorithm when generating patches.
`histogram`;;
This algorithm extends the patience algorithm to "support
low-occurrence common elements".
--
+
For instance, if you configured the `diff.algorithm` variable to a
non-default value and want to use the default one, then you
have to use `--diff-algorithm=default` option.

View File

@ -197,7 +197,26 @@ and starts with _<text>_, this algorithm attempts to prevent it from
appearing as a deletion or addition in the output. It uses the "patience
diff" algorithm internally.
include::diff-algorithm-option.adoc[]
`--diff-algorithm=(patience|minimal|histogram|myers)`::
Choose a diff algorithm. The variants are as follows:
+
--
`default`;;
`myers`;;
The basic greedy diff algorithm. Currently, this is the default.
`minimal`;;
Spend extra time to make sure the smallest possible diff is
produced.
`patience`;;
Use "patience diff" algorithm when generating patches.
`histogram`;;
This algorithm extends the patience algorithm to "support
low-occurrence common elements".
--
+
For instance, if you configured the `diff.algorithm` variable to a
non-default value and want to use the default one, then you
have to use `--diff-algorithm=default` option.
`--stat[=<width>[,<name-width>[,<count>]]]`::
Generate a diffstat. By default, as much space as necessary

View File

@ -1,41 +1,41 @@
`--all`::
`--no-all`::
--all::
--no-all::
Fetch all remotes, except for the ones that has the
`remote.<name>.skipFetchAll` configuration variable set.
This overrides the configuration variable `fetch.all`.
`-a`::
`--append`::
-a::
--append::
Append ref names and object names of fetched refs to the
existing contents of `.git/FETCH_HEAD`. Without this
option old data in `.git/FETCH_HEAD` will be overwritten.
`--atomic`::
--atomic::
Use an atomic transaction to update local refs. Either all refs are
updated, or on error, no refs are updated.
`--depth=<depth>`::
--depth=<depth>::
Limit fetching to the specified number of commits from the tip of
each remote branch history. If fetching to a 'shallow' repository
created by `git clone` with `--depth=<depth>` option (see
linkgit:git-clone[1]), deepen or shorten the history to the specified
number of commits. Tags for the deepened commits are not fetched.
`--deepen=<depth>`::
Similar to `--depth`, except it specifies the number of commits
--deepen=<depth>::
Similar to --depth, except it specifies the number of commits
from the current shallow boundary instead of from the tip of
each remote branch history.
`--shallow-since=<date>`::
--shallow-since=<date>::
Deepen or shorten the history of a shallow repository to
include all reachable commits after _<date>_.
include all reachable commits after <date>.
`--shallow-exclude=<ref>`::
--shallow-exclude=<ref>::
Deepen or shorten the history of a shallow repository to
exclude commits reachable from a specified remote branch or tag.
This option can be specified multiple times.
`--unshallow`::
--unshallow::
If the source repository is complete, convert a shallow
repository to a complete one, removing all the limitations
imposed by shallow repositories.
@ -43,13 +43,13 @@
If the source repository is shallow, fetch as much as possible so that
the current repository has the same history as the source repository.
`--update-shallow`::
--update-shallow::
By default when fetching from a shallow repository,
`git fetch` refuses refs that require updating
`.git/shallow`. This option updates `.git/shallow` and accepts such
.git/shallow. This option updates .git/shallow and accepts such
refs.
`--negotiation-tip=(<commit>|<glob>)`::
--negotiation-tip=<commit|glob>::
By default, Git will report, to the server, commits reachable
from all local refs to find common commits in an attempt to
reduce the size of the to-be-received packfile. If specified,
@ -69,28 +69,28 @@ See also the `fetch.negotiationAlgorithm` and `push.negotiate`
configuration variables documented in linkgit:git-config[1], and the
`--negotiate-only` option below.
`--negotiate-only`::
--negotiate-only::
Do not fetch anything from the server, and instead print the
ancestors of the provided `--negotiation-tip=` arguments,
ancestors of the provided `--negotiation-tip=*` arguments,
which we have in common with the server.
+
This is incompatible with `--recurse-submodules=(yes|on-demand)`.
This is incompatible with `--recurse-submodules=[yes|on-demand]`.
Internally this is used to implement the `push.negotiate` option, see
linkgit:git-config[1].
`--dry-run`::
--dry-run::
Show what would be done, without making any changes.
`--porcelain`::
--porcelain::
Print the output to standard output in an easy-to-parse format for
scripts. See section OUTPUT in linkgit:git-fetch[1] for details.
+
This is incompatible with `--recurse-submodules=(yes|on-demand)` and takes
This is incompatible with `--recurse-submodules=[yes|on-demand]` and takes
precedence over the `fetch.output` config option.
ifndef::git-pull[]
`--write-fetch-head`::
`--no-write-fetch-head`::
--write-fetch-head::
--no-write-fetch-head::
Write the list of remote refs fetched in the `FETCH_HEAD`
file directly under `$GIT_DIR`. This is the default.
Passing `--no-write-fetch-head` from the command line tells
@ -98,65 +98,64 @@ ifndef::git-pull[]
file is never written.
endif::git-pull[]
`-f`::
`--force`::
-f::
--force::
When 'git fetch' is used with `<src>:<dst>` refspec, it may
refuse to update the local branch as discussed
ifdef::git-pull[]
When `git fetch` is used with `<src>:<dst>` refspec, it may
refuse to update the local branch as discussed
in the _<refspec>_ part of the linkgit:git-fetch[1]
documentation.
in the `<refspec>` part of the linkgit:git-fetch[1]
documentation.
endif::git-pull[]
ifndef::git-pull[]
When `git fetch` is used with `<src>:<dst>` refspec, it may
refuse to update the local branch as discussed in the _<refspec>_ part below.
in the `<refspec>` part below.
endif::git-pull[]
This option overrides that check.
This option overrides that check.
`-k`::
`--keep`::
-k::
--keep::
Keep downloaded pack.
ifndef::git-pull[]
`--multiple`::
Allow several _<repository>_ and _<group>_ arguments to be
specified. No __<refspec>__s may be specified.
--multiple::
Allow several <repository> and <group> arguments to be
specified. No <refspec>s may be specified.
`--auto-maintenance`::
`--no-auto-maintenance`::
`--auto-gc`::
`--no-auto-gc`::
--auto-maintenance::
--no-auto-maintenance::
--auto-gc::
--no-auto-gc::
Run `git maintenance run --auto` at the end to perform automatic
repository maintenance if needed.
repository maintenance if needed. (`--[no-]auto-gc` is a synonym.)
This is enabled by default.
`--write-commit-graph`::
`--no-write-commit-graph`::
--write-commit-graph::
--no-write-commit-graph::
Write a commit-graph after fetching. This overrides the config
setting `fetch.writeCommitGraph`.
endif::git-pull[]
`--prefetch`::
--prefetch::
Modify the configured refspec to place all refs into the
`refs/prefetch/` namespace. See the `prefetch` task in
linkgit:git-maintenance[1].
`-p`::
`--prune`::
-p::
--prune::
Before fetching, remove any remote-tracking references that no
longer exist on the remote. Tags are not subject to pruning
if they are fetched only because of the default tag
auto-following or due to a `--tags` option. However, if tags
auto-following or due to a --tags option. However, if tags
are fetched due to an explicit refspec (either on the command
line or in the remote configuration, for example if the remote
was cloned with the `--mirror` option), then they are also
was cloned with the --mirror option), then they are also
subject to pruning. Supplying `--prune-tags` is a shorthand for
providing the tag refspec.
ifndef::git-pull[]
+
See the PRUNING section below for more details.
`-P`::
`--prune-tags`::
-P::
--prune-tags::
Before fetching, remove any local tags that no longer exist on
the remote if `--prune` is enabled. This option should be used
more carefully, unlike `--prune` it will remove any local
@ -169,17 +168,17 @@ See the PRUNING section below for more details.
endif::git-pull[]
ifndef::git-pull[]
`-n`::
-n::
endif::git-pull[]
`--no-tags`::
--no-tags::
By default, tags that point at objects that are downloaded
from the remote repository are fetched and stored locally.
This option disables this automatic tag following. The default
behavior for a remote may be specified with the `remote.<name>.tagOpt`
behavior for a remote may be specified with the remote.<name>.tagOpt
setting. See linkgit:git-config[1].
ifndef::git-pull[]
`--refetch`::
--refetch::
Instead of negotiating with the server to avoid transferring commits and
associated objects that are already present locally, this option fetches
all objects as a fresh clone would. Use this to reapply a partial clone
@ -188,29 +187,29 @@ ifndef::git-pull[]
object database pack consolidation to remove any duplicate objects.
endif::git-pull[]
`--refmap=<refspec>`::
--refmap=<refspec>::
When fetching refs listed on the command line, use the
specified refspec (can be given more than once) to map the
refs to remote-tracking branches, instead of the values of
`remote.<name>.fetch` configuration variables for the remote
repository. Providing an empty _<refspec>_ to the
`remote.*.fetch` configuration variables for the remote
repository. Providing an empty `<refspec>` to the
`--refmap` option causes Git to ignore the configured
refspecs and rely entirely on the refspecs supplied as
command-line arguments. See section on "Configured Remote-tracking
Branches" for details.
`-t`::
`--tags`::
-t::
--tags::
Fetch all tags from the remote (i.e., fetch remote tags
`refs/tags/*` into local tags with the same name), in addition
to whatever else would otherwise be fetched. Using this
option alone does not subject tags to pruning, even if `--prune`
option alone does not subject tags to pruning, even if --prune
is used (though tags may be pruned anyway if they are also the
destination of an explicit refspec; see `--prune`).
ifndef::git-pull[]
`--recurse-submodules[=(yes|on-demand|no)]`::
Control if and under what conditions new commits of
--recurse-submodules[=(yes|on-demand|no)]::
This option controls if and under what conditions new commits of
submodules should be fetched too. When recursing through submodules,
`git fetch` always attempts to fetch "changed" submodules, that is, a
submodule that has commits that are referenced by a newly fetched
@ -220,19 +219,19 @@ ifndef::git-pull[]
adds a new submodule, that submodule cannot be fetched until it is
cloned e.g. by `git submodule update`.
+
When set to `on-demand`, only changed submodules are fetched. When set
to `yes`, all populated submodules are fetched and submodules that are
both unpopulated and changed are fetched. When set to `no`, submodules
When set to 'on-demand', only changed submodules are fetched. When set
to 'yes', all populated submodules are fetched and submodules that are
both unpopulated and changed are fetched. When set to 'no', submodules
are never fetched.
+
When unspecified, this uses the value of `fetch.recurseSubmodules` if it
is set (see linkgit:git-config[1]), defaulting to `on-demand` if unset.
When this option is used without any value, it defaults to `yes`.
is set (see linkgit:git-config[1]), defaulting to 'on-demand' if unset.
When this option is used without any value, it defaults to 'yes'.
endif::git-pull[]
`-j <n>`::
`--jobs=<n>`::
Parallelize all forms of fetching up to _<n>_ jobs at a time.
-j::
--jobs=<n>::
Number of parallel children to be used for all forms of fetching.
+
If the `--multiple` option was specified, the different remotes will be fetched
in parallel. If multiple submodules are fetched, they will be fetched in
@ -243,12 +242,12 @@ Typically, parallel recursive and multi-remote fetches will be faster. By
default fetches are performed sequentially, not in parallel.
ifndef::git-pull[]
`--no-recurse-submodules`::
--no-recurse-submodules::
Disable recursive fetching of submodules (this has the same effect as
using the `--recurse-submodules=no` option).
endif::git-pull[]
`--set-upstream`::
--set-upstream::
If the remote is fetched successfully, add upstream
(tracking) reference, used by argument-less
linkgit:git-pull[1] and other commands. For more information,
@ -256,57 +255,57 @@ endif::git-pull[]
linkgit:git-config[1].
ifndef::git-pull[]
`--submodule-prefix=<path>`::
Prepend _<path>_ to paths printed in informative messages
--submodule-prefix=<path>::
Prepend <path> to paths printed in informative messages
such as "Fetching submodule foo". This option is used
internally when recursing over submodules.
`--recurse-submodules-default=(yes|on-demand)`::
--recurse-submodules-default=[yes|on-demand]::
This option is used internally to temporarily provide a
non-negative default value for the `--recurse-submodules`
non-negative default value for the --recurse-submodules
option. All other methods of configuring fetch's submodule
recursion (such as settings in linkgit:gitmodules[5] and
linkgit:git-config[1]) override this option, as does
specifying `--[no-]recurse-submodules` directly.
specifying --[no-]recurse-submodules directly.
`-u`::
`--update-head-ok`::
By default `git fetch` refuses to update the head which
-u::
--update-head-ok::
By default 'git fetch' refuses to update the head which
corresponds to the current branch. This flag disables the
check. This is purely for the internal use for `git pull`
to communicate with `git fetch`, and unless you are
check. This is purely for the internal use for 'git pull'
to communicate with 'git fetch', and unless you are
implementing your own Porcelain you are not supposed to
use it.
endif::git-pull[]
`--upload-pack <upload-pack>`::
--upload-pack <upload-pack>::
When given, and the repository to fetch from is handled
by `git fetch-pack`, `--exec=<upload-pack>` is passed to
by 'git fetch-pack', `--exec=<upload-pack>` is passed to
the command to specify non-default path for the command
run on the other end.
ifndef::git-pull[]
`-q`::
`--quiet`::
Pass `--quiet` to `git-fetch-pack` and silence any other internally
-q::
--quiet::
Pass --quiet to git-fetch-pack and silence any other internally
used git commands. Progress is not reported to the standard error
stream.
`-v`::
`--verbose`::
-v::
--verbose::
Be verbose.
endif::git-pull[]
`--progress`::
--progress::
Progress status is reported on the standard error stream
by default when it is attached to a terminal, unless `-q`
by default when it is attached to a terminal, unless -q
is specified. This flag forces progress status even if the
standard error stream is not directed to a terminal.
`-o <option>`::
`--server-option=<option>`::
-o <option>::
--server-option=<option>::
Transmit the given string to the server when communicating using
protocol version 2. The given string must not contain a _NUL_ or _LF_
protocol version 2. The given string must not contain a NUL or LF
character. The server's handling of server options, including
unknown ones, is server-specific.
When multiple `--server-option=<option>` are given, they are all
@ -315,23 +314,23 @@ endif::git-pull[]
the values of configuration variable `remote.<name>.serverOption`
are used instead.
`--show-forced-updates`::
--show-forced-updates::
By default, git checks if a branch is force-updated during
fetch. This can be disabled through `fetch.showForcedUpdates`, but
the `--show-forced-updates` option guarantees this check occurs.
fetch. This can be disabled through fetch.showForcedUpdates, but
the --show-forced-updates option guarantees this check occurs.
See linkgit:git-config[1].
`--no-show-forced-updates`::
--no-show-forced-updates::
By default, git checks if a branch is force-updated during
fetch. Pass `--no-show-forced-updates` or set `fetch.showForcedUpdates`
fetch. Pass --no-show-forced-updates or set fetch.showForcedUpdates
to false to skip this check for performance reasons. If used during
`git-pull` the `--ff-only` option will still check for forced updates
'git-pull' the --ff-only option will still check for forced updates
before attempting a fast-forward update. See linkgit:git-config[1].
`-4`::
`--ipv4`::
-4::
--ipv4::
Use IPv4 addresses only, ignoring IPv6 addresses.
`-6`::
`--ipv6`::
-6::
--ipv6::
Use IPv6 addresses only, ignoring IPv4 addresses.

View File

@ -162,13 +162,6 @@ Valid <action> for the `--whitespace` option are:
commit creation as the committer date. This allows the
user to lie about the committer date by using the same
value as the author date.
+
WARNING: The history walking machinery assumes that commits have
non-decreasing commit timestamps. You should consider if you really need
to use this option. Then you should only use this option to override the
committer date when applying commits on top of a base which commit is
older (in terms of the commit date) than the oldest patch you are
applying.
--ignore-date::
By default the command records the date from the e-mail

View File

@ -85,8 +85,6 @@ include::blame-options.adoc[]
Ignore whitespace when comparing the parent's version and
the child's to find where the lines came from.
include::diff-algorithm-option.adoc[]
--abbrev=<n>::
Instead of using the default 7+1 hexadecimal digits as the
abbreviated object name, use <m>+1 digits, where <m> is at

View File

@ -509,7 +509,7 @@ ARGUMENT DISAMBIGUATION
-----------------------
When you run `git checkout <something>`, Git tries to guess whether
_<something>_ is intended to be a branch, a commit, or a set of file(s),
`<something>` is intended to be a branch, a commit, or a set of file(s),
and then either switches to that branch or commit, or restores the
specified files.

View File

@ -146,8 +146,7 @@ See linkgit:git-rebase[1] for details.
linkgit:git-status[1] for details. Implies `--dry-run`.
`--branch`::
Show the branch and tracking info even in short-format. See
linkgit:git-status[1] for details.
Show the branch and tracking info even in short-format.
`--porcelain`::
When doing a dry-run, give the output in a porcelain-ready
@ -155,13 +154,12 @@ See linkgit:git-rebase[1] for details.
`--dry-run`.
`--long`::
When doing a dry-run, give the output in the long-format. This
is the default output of linkgit:git-status[1]. Implies
`--dry-run`.
When doing a dry-run, give the output in the long-format.
Implies `--dry-run`.
`-z`::
`--null`::
When showing `short` or `porcelain` linkgit:git-status[1] output, print the
When showing `short` or `porcelain` status output, print the
filename verbatim and terminate the entries with _NUL_, instead of _LF_.
If no format is given, implies the `--porcelain` output format.
Without the `-z` option, filenames with "unusual" characters are

View File

@ -66,26 +66,15 @@ fast-import stream! This option is enabled automatically for
remote-helpers that use the `import` capability, as they are
already trusted to run their own code.
`--signed-tags=(verbatim|warn-verbatim|warn-strip|strip|abort)`::
Specify how to handle signed tags. Behaves in the same way as
the `--signed-commits=<mode>` below, except that the
`strip-if-invalid` mode is not yet supported. Like for signed
commits, the default mode is `verbatim`.
--signed-tags=(verbatim|warn-verbatim|warn-strip|strip|abort)::
Specify how to handle signed tags. Behaves in the same way
as the same option in linkgit:git-fast-export[1], except that
default is 'verbatim' (instead of 'abort').
`--signed-commits=<mode>`::
Specify how to handle signed commits. The following <mode>s
are supported:
+
* `verbatim`, which is the default, will silently import commit
signatures.
* `warn-verbatim` will import them, but will display a warning.
* `abort` will make this program die when encountering a signed
commit.
* `strip` will silently make the commits unsigned.
* `warn-strip` will make them unsigned, but will display a warning.
* `strip-if-invalid` will check signatures and, if they are invalid,
will strip them and display a warning. The validation is performed
in the same way as linkgit:git-verify-commit[1] does it.
--signed-commits=(verbatim|warn-verbatim|warn-strip|strip|abort)::
Specify how to handle signed commits. Behaves in the same way
as the same option in linkgit:git-fast-export[1], except that
default is 'verbatim' (instead of 'abort').
Options for Frontends
~~~~~~~~~~~~~~~~~~~~~

View File

@ -8,11 +8,11 @@ git-fetch - Download objects and refs from another repository
SYNOPSIS
--------
[synopsis]
git fetch [<options>] [<repository> [<refspec>...]]
git fetch [<options>] <group>
git fetch --multiple [<options>] [(<repository>|<group>)...]
git fetch --all [<options>]
[verse]
'git fetch' [<options>] [<repository> [<refspec>...]]
'git fetch' [<options>] <group>
'git fetch' --multiple [<options>] [(<repository> | <group>)...]
'git fetch' --all [<options>]
DESCRIPTION
@ -20,19 +20,19 @@ DESCRIPTION
Fetch branches and/or tags (collectively, "refs") from one or more
other repositories, along with the objects necessary to complete their
histories. Remote-tracking branches are updated (see the description
of _<refspec>_ below for ways to control this behavior).
of <refspec> below for ways to control this behavior).
By default, any tag that points into the histories being fetched is
also fetched; the effect is to fetch tags that
point at branches that you are interested in. This default behavior
can be changed by using the `--tags` or `--no-tags` options or by
configuring `remote.<name>.tagOpt`. By using a refspec that fetches tags
can be changed by using the --tags or --no-tags options or by
configuring remote.<name>.tagOpt. By using a refspec that fetches tags
explicitly, you can fetch tags that do not point into branches you
are interested in as well.
`git fetch` can fetch from either a single named repository or URL,
or from several repositories at once if _<group>_ is given and
there is a `remotes.<group>` entry in the configuration file.
'git fetch' can fetch from either a single named repository or URL,
or from several repositories at once if <group> is given and
there is a remotes.<group> entry in the configuration file.
(See linkgit:git-config[1]).
When no remote is specified, by default the `origin` remote will be used,
@ -48,15 +48,15 @@ include::fetch-options.adoc[]
include::pull-fetch-param.adoc[]
`--stdin`::
--stdin::
Read refspecs, one per line, from stdin in addition to those provided
as arguments. The "tag _<name>_" format is not supported.
as arguments. The "tag <name>" format is not supported.
include::urls-remotes.adoc[]
[[CRTB]]
CONFIGURED REMOTE-TRACKING BRANCHES
-----------------------------------
CONFIGURED REMOTE-TRACKING BRANCHES[[CRTB]]
-------------------------------------------
You often interact with the same remote repository by
regularly and repeatedly fetching from it. In order to keep track
@ -84,13 +84,13 @@ This configuration is used in two ways:
* When `git fetch` is run with explicit branches and/or tags
to fetch on the command line, e.g. `git fetch origin master`, the
_<refspec>s_ given on the command line determine what are to be
<refspec>s given on the command line determine what are to be
fetched (e.g. `master` in the example,
which is a short-hand for `master:`, which in turn means
"fetch the `master` branch but I do not explicitly say what
"fetch the 'master' branch but I do not explicitly say what
remote-tracking branch to update with it from the command line"),
and the example command will
fetch _only_ the `master` branch. The `remote.<repository>.fetch`
fetch _only_ the 'master' branch. The `remote.<repository>.fetch`
values determine which
remote-tracking branch, if any, is updated. When used in this
way, the `remote.<repository>.fetch` values do not have any
@ -144,9 +144,9 @@ tracking branches that are deleted, but any local tag that doesn't
exist on the remote.
This might not be what you expect, i.e. you want to prune remote
_<name>_, but also explicitly fetch tags from it, so when you fetch
`<name>`, but also explicitly fetch tags from it, so when you fetch
from it you delete all your local tags, most of which may not have
come from the _<name>_ remote in the first place.
come from the `<name>` remote in the first place.
So be careful when using this with a refspec like
`refs/tags/*:refs/tags/*`, or any other refspec which might map
@ -213,11 +213,11 @@ of the form:
<flag> <old-object-id> <new-object-id> <local-reference>
-------------------------------
The status of up-to-date refs is shown only if the `--verbose` option is
The status of up-to-date refs is shown only if the --verbose option is
used.
In compact output mode, specified with configuration variable
fetch.output, if either entire _<from>_ or _<to>_ is found in the
fetch.output, if either entire `<from>` or `<to>` is found in the
other string, it will be substituted with `*` in the other string. For
example, `master -> origin/master` becomes `master -> origin/*`.
@ -303,7 +303,7 @@ include::config/fetch.adoc[]
BUGS
----
Using `--recurse-submodules` can only fetch new commits in submodules that are
Using --recurse-submodules can only fetch new commits in submodules that are
present locally e.g. in `$GIT_DIR/modules/`. If the upstream adds a new
submodule, that submodule cannot be fetched until it is cloned e.g. by `git
submodule update`. This is expected to be fixed in a future Git version.

View File

@ -12,7 +12,6 @@ SYNOPSIS
'git maintenance' run [<options>]
'git maintenance' start [--scheduler=<scheduler>]
'git maintenance' (stop|register|unregister) [<options>]
'git maintenance' is-needed [<options>]
DESCRIPTION
@ -85,16 +84,6 @@ The `unregister` subcommand will report an error if the current repository
is not already registered. Use the `--force` option to return success even
when the current repository is not registered.
is-needed::
Check whether maintenance needs to be run without actually running it.
Exits with a 0 status code if maintenance needs to be run, 1 otherwise.
Ideally used with the '--auto' flag.
+
If one or more `--task` options are specified, then those tasks are checked
in that order. Otherwise, the tasks are determined by which
`maintenance.<task>.enabled` config options are true. By default, only
`maintenance.gc.enabled` is true.
TASKS
-----
@ -194,8 +183,6 @@ OPTIONS
in the `gc.auto` config setting, or when the number of pack-files
exceeds the `gc.autoPackLimit` config setting. Not compatible with
the `--schedule` option.
When combined with the `is-needed` subcommand, check if the required
thresholds are met without actually running maintenance.
--schedule::
When combined with the `run` subcommand, run maintenance tasks

View File

@ -8,8 +8,8 @@ git-pull - Fetch from and integrate with another repository or a local branch
SYNOPSIS
--------
[synopsis]
git pull [<options>] [<repository> [<refspec>...]]
[verse]
'git pull' [<options>] [<repository> [<refspec>...]]
DESCRIPTION
@ -37,13 +37,13 @@ You can also set the configuration options `pull.rebase`, `pull.squash`,
or `pull.ff` with your preferred behaviour.
If there's a merge conflict during the merge or rebase that you don't
want to handle, you can safely abort it with `git merge --abort` or
`git rebase --abort`.
want to handle, you can safely abort it with `git merge --abort` or `git
--rebase abort`.
OPTIONS
-------
_<repository>_::
<repository>::
The "remote" repository to pull from. This can be either
a URL (see the section <<URLS,GIT URLS>> below) or the name
of a remote (see the section <<REMOTES,REMOTES>> below).
@ -52,29 +52,29 @@ Defaults to the configured upstream for the current branch, or `origin`.
See <<UPSTREAM-BRANCHES,UPSTREAM BRANCHES>> below for more on how to
configure upstreams.
_<refspec>_::
<refspec>::
Which branch or other reference(s) to fetch and integrate into the
current branch, for example `main` in `git pull origin main`.
Defaults to the configured upstream for the current branch.
+
This can be a branch, tag, or other collection of reference(s).
See <<fetch-refspec,_<refspec>_>> below under "Options related to fetching"
See <<fetch-refspec,<refspec>>> below under "Options related to fetching"
for the full syntax, and <<DEFAULT-BEHAVIOUR,DEFAULT BEHAVIOUR>> below
for how `git pull` uses this argument to determine which remote branch
to integrate.
`-q`::
`--quiet`::
-q::
--quiet::
This is passed to both underlying git-fetch to squelch reporting of
during transfer, and underlying git-merge to squelch output during
merging.
`-v`::
`--verbose`::
Pass `--verbose` to git-fetch and git-merge.
-v::
--verbose::
Pass --verbose to git-fetch and git-merge.
`--recurse-submodules[=(yes|on-demand|no)]`::
`--no-recurse-submodules`::
--recurse-submodules[=(yes|on-demand|no)]::
--no-recurse-submodules::
This option controls if new commits of populated submodules should
be fetched, and if the working trees of active submodules should be
updated, too (see linkgit:git-fetch[1], linkgit:git-config[1] and
@ -91,20 +91,21 @@ Options related to merging
include::merge-options.adoc[]
`-r`::
`--rebase[=(true|merges|false|interactive)]`::
`true`;; rebase the current branch on top of the upstream
-r::
--rebase[=(false|true|merges|interactive)]::
When true, rebase the current branch on top of the upstream
branch after fetching. If there is a remote-tracking branch
corresponding to the upstream branch and the upstream branch
was rebased since last fetched, the rebase uses that information
to avoid rebasing non-local changes. This is the default.
`merges`;; rebase using `git rebase --rebase-merges` so that
to avoid rebasing non-local changes.
+
When set to `merges`, rebase using `git rebase --rebase-merges` so that
the local merge commits are included in the rebase (see
linkgit:git-rebase[1] for details).
`false`;; merge the upstream branch into the current branch.
`interactive`;; enable the interactive mode of rebase.
+
When false, merge the upstream branch into the current branch.
+
When `interactive`, enable the interactive mode of rebase.
+
See `pull.rebase`, `branch.<name>.rebase` and `branch.autoSetupRebase` in
linkgit:git-config[1] if you want to make `git pull` always use
@ -116,8 +117,8 @@ It rewrites history, which does not bode well when you
published that history already. Do *not* use this option
unless you have read linkgit:git-rebase[1] carefully.
`--no-rebase`::
This is shorthand for `--rebase=false`.
--no-rebase::
This is shorthand for --rebase=false.
Options related to fetching
~~~~~~~~~~~~~~~~~~~~~~~~~~~
@ -177,7 +178,7 @@ uses the refspec from the configuration or
rules apply:
. If `branch.<name>.merge` configuration for the current
branch _<name>_ exists, that is the name of the branch at the
branch `<name>` exists, that is the name of the branch at the
remote site that is merged.
. If the refspec is a globbing one, nothing is merged.
@ -197,9 +198,9 @@ $ git pull
$ git pull origin
------------------------------------------------
+
Normally the branch merged in is the `HEAD` of the remote repository,
but the choice is determined by the `branch.<name>.remote` and
`branch.<name>.merge` options; see linkgit:git-config[1] for details.
Normally the branch merged in is the HEAD of the remote repository,
but the choice is determined by the branch.<name>.remote and
branch.<name>.merge options; see linkgit:git-config[1] for details.
* Merge into the current branch the remote branch `next`:
+
@ -207,7 +208,7 @@ but the choice is determined by the `branch.<name>.remote` and
$ git pull origin next
------------------------------------------------
+
This leaves a copy of `next` temporarily in `FETCH_HEAD`, and
This leaves a copy of `next` temporarily in FETCH_HEAD, and
updates the remote-tracking branch `origin/next`.
The same can be done by invoking fetch and merge:
+
@ -218,14 +219,14 @@ $ git merge origin/next
If you tried a pull which resulted in complex conflicts and
would want to start over, you can recover with `git reset`.
would want to start over, you can recover with 'git reset'.
include::transfer-data-leaks.adoc[]
BUGS
----
Using `--recurse-submodules` can only fetch new commits in already checked
Using --recurse-submodules can only fetch new commits in already checked
out submodules right now. When e.g. upstream added a new submodule in the
just fetched commits of the superproject the submodule itself cannot be
fetched, making it impossible to check out that submodule later without

View File

@ -8,13 +8,13 @@ git-push - Update remote refs along with associated objects
SYNOPSIS
--------
[synopsis]
git push [--all | --branches | --mirror | --tags] [--follow-tags] [--atomic] [-n | --dry-run] [--receive-pack=<git-receive-pack>]
[--repo=<repository>] [-f | --force] [-d | --delete] [--prune] [-q | --quiet] [-v | --verbose]
[-u | --set-upstream] [-o <string> | --push-option=<string>]
[--[no-]signed | --signed=(true|false|if-asked)]
[--force-with-lease[=<refname>[:<expect>]] [--force-if-includes]]
[--no-verify] [<repository> [<refspec>...]]
[verse]
'git push' [--all | --branches | --mirror | --tags] [--follow-tags] [--atomic] [-n | --dry-run] [--receive-pack=<git-receive-pack>]
[--repo=<repository>] [-f | --force] [-d | --delete] [--prune] [-q | --quiet] [-v | --verbose]
[-u | --set-upstream] [-o <string> | --push-option=<string>]
[--[no-]signed|--signed=(true|false|if-asked)]
[--force-with-lease[=<refname>[:<expect>]] [--force-if-includes]]
[--no-verify] [<repository> [<refspec>...]]
DESCRIPTION
-----------
@ -35,7 +35,7 @@ To decide which branches, tags, or other refs to push, Git uses
1. The `<refspec>` argument(s) (for example `main` in `git push origin main`)
or the `--all`, `--mirror`, or `--tags` options
2. The `remote.<name>.push` configuration for the repository being pushed to
2. The `remote.*.push` configuration for the repository being pushed to
3. The `push.default` configuration. The default is `push.default=simple`,
which will push to a branch with the same name as the current branch.
See the <<CONFIGURATION,CONFIGURATION>> section below for more on `push.default`.
@ -49,25 +49,25 @@ You can make interesting things happen to a repository
every time you push into it, by setting up 'hooks' there. See
documentation for linkgit:git-receive-pack[1].
[[OPTIONS]]
OPTIONS
-------
_<repository>_::
OPTIONS[[OPTIONS]]
------------------
<repository>::
The "remote" repository that is the destination of a push
operation. This parameter can be either a URL
(see the section <<URLS,GIT URLS>> below) or the name
of a remote (see the section <<REMOTES,REMOTES>> below).
`<refspec>...`::
<refspec>...::
Specify what destination ref to update with what source object.
+
The format for a refspec is `[+]<src>[:<dst>]`, for example `main`,
The format for a refspec is [+]<src>[:<dst>], for example `main`,
`main:other`, or `HEAD^:refs/heads/main`.
+
The _<src>_ is often the name of the local branch to push, but it can be
The `<src>` is often the name of the local branch to push, but it can be
any arbitrary "SHA-1 expression" (see linkgit:gitrevisions[7]).
+
The _<dst>_ determines what ref to update on the remote side. It must be the
The `<dst>` determines what ref to update on the remote side. It must be the
name of a branch, tag, or other ref, not an arbitrary expression.
+
The `+` is optional and does the same thing as `--force`.
@ -78,23 +78,23 @@ and destination, or with a shorter form (for example `main` or
`main:other`). Here are the rules for how refspecs are expanded,
as well as various other special refspec forms:
+
* _<src>_ without a `:<dst>` means to update the same ref as the
_<src>_, unless the `remote.<repository>.push` configuration specifies a
different _<dst>_. For example, if `main` is a branch, then the refspec
* `<src>` without a `:<dst>` means to update the same ref as the
`<src>`, unless the `remote.<repository>.push` configuration specifies a
different <dst>. For example, if `main` is a branch, then the refspec
`main` expands to `main:refs/heads/main`.
* If _<dst>_ unambiguously refers to a ref on the <repository> remote,
* If `<dst>` unambiguously refers to a ref on the <repository> remote,
then expand it to that ref. For example, if `v1.0` is a tag on the
remote, then `HEAD:v1.0` expands to `HEAD:refs/tags/v1.0`.
* If _<src>_ resolves to a ref starting with `refs/heads/` or `refs/tags/`,
* If `<src>` resolves to a ref starting with `refs/heads/` or `refs/tags/`,
then prepend that to <dst>. For example, if `main` is a branch, then
`main:other` expands to `main:refs/heads/other`
* The special refspec `:` (or `+:` to allow non-fast-forward updates)
directs Git to push "matching" branches: for every branch that exists on
the local side, the remote side is updated if a branch of the same name
already exists on the remote side.
* _<src>_ may contain a `*` to indicate a simple pattern match.
* <src> may contain a * to indicate a simple pattern match.
This works like a glob that matches any ref matching the pattern.
There must be only one `*` in both the `<src>` and `<dst>`.
There must be only one * in both the `<src>` and `<dst>`.
It will map refs to the destination by replacing the * with the
contents matched from the source. For example, `refs/heads/*:refs/heads/*`
will push all branches.
@ -102,11 +102,11 @@ as well as various other special refspec forms:
This specifies refs to exclude. A ref will be considered to
match if it matches at least one positive refspec, and does not
match any negative refspec. Negative refspecs can be pattern refspecs.
They must only contain a _<src>_.
They must only contain a `<src>`.
Fully spelled out hex object names are also not supported.
For example, `git push origin 'refs/heads/*' '^refs/heads/dev-*'`
will push all branches except for those starting with `dev-`
* If _<src>_ is empty, it deletes the _<dst>_ ref from the remote
* If `<src>` is empty, it deletes the `<dst>` ref from the remote
repository. For example, `git push origin :dev` will
delete the `dev` branch.
* `tag <tag>` expands to `refs/tags/<tag>:refs/tags/<tag>`.
@ -121,12 +121,12 @@ as well as various other special refspec forms:
Not all updates are allowed: see PUSH RULES below for the details.
`--all`::
`--branches`::
--all::
--branches::
Push all branches (i.e. refs under `refs/heads/`); cannot be
used with other <refspec>.
`--prune`::
--prune::
Remove remote branches that don't have a local counterpart. For example
a remote branch `tmp` will be removed if a local branch with the same
name doesn't exist any more. This also respects refspecs, e.g.
@ -134,7 +134,7 @@ Not all updates are allowed: see PUSH RULES below for the details.
make sure that remote `refs/tmp/foo` will be removed if `refs/heads/foo`
doesn't exist.
`--mirror`::
--mirror::
Instead of naming each ref to push, specifies that all
refs under `refs/` (which includes but is not
limited to `refs/heads/`, `refs/remotes/`, and `refs/tags/`)
@ -145,26 +145,26 @@ Not all updates are allowed: see PUSH RULES below for the details.
if the configuration option `remote.<remote>.mirror` is
set.
`-n`::
`--dry-run`::
-n::
--dry-run::
Do everything except actually send the updates.
`--porcelain`::
--porcelain::
Produce machine-readable output. The output status line for each ref
will be tab-separated and sent to stdout instead of stderr. The full
symbolic names of the refs will be given.
`-d`::
`--delete`::
-d::
--delete::
All listed refs are deleted from the remote repository. This is
the same as prefixing all refs with a colon.
`--tags`::
--tags::
All refs under `refs/tags` are pushed, in
addition to refspecs explicitly listed on the command
line.
`--follow-tags`::
--follow-tags::
Push all the refs that would be pushed without this option,
and also push annotated tags in `refs/tags` that are missing
from the remote but are pointing at commit-ish that are
@ -172,34 +172,29 @@ Not all updates are allowed: see PUSH RULES below for the details.
with configuration variable `push.followTags`. For more
information, see `push.followTags` in linkgit:git-config[1].
`--signed`::
`--no-signed`::
`--signed=(true|false|if-asked)`::
--signed::
--no-signed::
--signed=(true|false|if-asked)::
GPG-sign the push request to update refs on the receiving
side, to allow it to be checked by the hooks and/or be
logged. Possible values are:
`false`;;
`--no-signed`;;
no signing will be attempted.
`true`;;
`--signed`;;
the push will fail if the server does not support signed pushes.
`if-asked`;;
sign if and only if the server supports signed pushes. The push
will also fail if the actual call to `gpg --sign` fails. See
linkgit:git-receive-pack[1] for the details on the receiving end.
logged. If `false` or `--no-signed`, no signing will be
attempted. If `true` or `--signed`, the push will fail if the
server does not support signed pushes. If set to `if-asked`,
sign if and only if the server supports signed pushes. The push
will also fail if the actual call to `gpg --sign` fails. See
linkgit:git-receive-pack[1] for the details on the receiving end.
`--atomic`::
`--no-atomic`::
--atomic::
--no-atomic::
Use an atomic transaction on the remote side if available.
Either all refs are updated, or on error, no refs are updated.
If the server does not support atomic pushes the push will fail.
`-o <option>`::
`--push-option=<option>`::
-o <option>::
--push-option=<option>::
Transmit the given string to the server, which passes them to
the pre-receive as well as the post-receive hook. The given string
must not contain a _NUL_ or _LF_ character.
must not contain a NUL or LF character.
When multiple `--push-option=<option>` are given, they are
all sent to the other side in the order listed on the
command line.
@ -207,22 +202,22 @@ linkgit:git-receive-pack[1] for the details on the receiving end.
line, the values of configuration variable `push.pushOption`
are used instead.
`--receive-pack=<git-receive-pack>`::
`--exec=<git-receive-pack>`::
--receive-pack=<git-receive-pack>::
--exec=<git-receive-pack>::
Path to the 'git-receive-pack' program on the remote
end. Sometimes useful when pushing to a remote
repository over ssh, and you do not have the program in
a directory on the default `$PATH`.
a directory on the default $PATH.
`--force-with-lease`::
`--no-force-with-lease`::
`--force-with-lease=<refname>`::
`--force-with-lease=<refname>:<expect>`::
Usually, `git push` refuses to update a remote ref that is
--force-with-lease::
--no-force-with-lease::
--force-with-lease=<refname>::
--force-with-lease=<refname>:<expect>::
Usually, "git push" refuses to update a remote ref that is
not an ancestor of the local ref used to overwrite it.
+
This option overrides this restriction if the current value of the
remote ref is the expected value. `git push` fails otherwise.
remote ref is the expected value. "git push" fails otherwise.
+
Imagine that you have to rebase what you have already published.
You will have to bypass the "must fast-forward" rule in order to
@ -244,16 +239,16 @@ current value to be the same as the remote-tracking branch we have
for them.
+
`--force-with-lease=<refname>`, without specifying the expected value, will
protect _<refname>_ (alone), if it is going to be updated, by
protect the named ref (alone), if it is going to be updated, by
requiring its current value to be the same as the remote-tracking
branch we have for it.
+
`--force-with-lease=<refname>:<expect>` will protect _<refname>_ (alone),
`--force-with-lease=<refname>:<expect>` will protect the named ref (alone),
if it is going to be updated, by requiring its current value to be
the same as the specified value _<expect>_ (which is allowed to be
the same as the specified value `<expect>` (which is allowed to be
different from the remote-tracking branch we have for the refname,
or we do not even have to have such a remote-tracking branch when
this form is used). If _<expect>_ is the empty string, then the named ref
this form is used). If `<expect>` is the empty string, then the named ref
must not already exist.
+
Note that all forms other than `--force-with-lease=<refname>:<expect>`
@ -261,7 +256,7 @@ that specifies the expected current value of the ref explicitly are
still experimental and their semantics may change as we gain experience
with this feature.
+
`--no-force-with-lease` will cancel all the previous `--force-with-lease` on the
"--no-force-with-lease" will cancel all the previous --force-with-lease on the
command line.
+
A general note on safety: supplying this option without an expected
@ -281,29 +276,23 @@ If your editor or some other system is running `git fetch` in the
background for you a way to mitigate this is to simply set up another
remote:
+
----
git remote add origin-push $(git config remote.origin.url)
git fetch origin-push
----
git remote add origin-push $(git config remote.origin.url)
git fetch origin-push
+
Now when the background process runs `git fetch origin` the references
on `origin-push` won't be updated, and thus commands like:
+
----
git push --force-with-lease origin-push
----
git push --force-with-lease origin-push
+
Will fail unless you manually run `git fetch origin-push`. This method
is of course entirely defeated by something that runs `git fetch
--all`, in that case you'd need to either disable it or do something
more tedious like:
+
----
git fetch # update 'master' from remote
git tag base master # mark our base point
git rebase -i master # rewrite some commits
git push --force-with-lease=master:base master:master
----
git fetch # update 'master' from remote
git tag base master # mark our base point
git rebase -i master # rewrite some commits
git push --force-with-lease=master:base master:master
+
I.e. create a `base` tag for versions of the upstream code that you've
seen and are willing to overwrite, then rewrite history, and finally
@ -319,26 +308,26 @@ verify if updates from the remote-tracking refs that may have been
implicitly updated in the background are integrated locally before
allowing a forced update.
`-f`::
`--force`::
-f::
--force::
Usually, `git push` will refuse to update a branch that is not an
ancestor of the commit being pushed.
+
This flag disables that check, the other safety checks in PUSH RULES
below, and the checks in `--force-with-lease`. It can cause the remote
below, and the checks in --force-with-lease. It can cause the remote
repository to lose commits; use it with care.
+
Note that `--force` applies to all the refs that are pushed, hence
using it with `push.default` set to `matching` or with multiple push
destinations configured with `remote.<name>.push` may overwrite refs
destinations configured with `remote.*.push` may overwrite refs
other than the current branch (including local refs that are
strictly behind their remote counterpart). To force a push to only
one branch, use a `+` in front of the refspec to push (e.g `git push
origin +master` to force a push to the `master` branch). See the
`<refspec>...` section above for details.
`--force-if-includes`::
`--no-force-if-includes`::
--force-if-includes::
--no-force-if-includes::
Force an update only if the tip of the remote-tracking ref
has been integrated locally.
+
@ -354,78 +343,72 @@ a "no-op".
+
Specifying `--no-force-if-includes` disables this behavior.
`--repo=<repository>`::
This option is equivalent to the _<repository>_ argument. If both
--repo=<repository>::
This option is equivalent to the <repository> argument. If both
are specified, the command-line argument takes precedence.
`-u`::
`--set-upstream`::
-u::
--set-upstream::
For every branch that is up to date or successfully pushed, add
upstream (tracking) reference, used by argument-less
linkgit:git-pull[1] and other commands. For more information,
see `branch.<name>.merge` in linkgit:git-config[1].
`--thin`::
`--no-thin`::
--thin::
--no-thin::
These options are passed to linkgit:git-send-pack[1]. A thin transfer
significantly reduces the amount of sent data when the sender and
receiver share many of the same objects in common. The default is
`--thin`.
`-q`::
`--quiet`::
-q::
--quiet::
Suppress all output, including the listing of updated refs,
unless an error occurs. Progress is not reported to the standard
error stream.
`-v`::
`--verbose`::
-v::
--verbose::
Run verbosely.
`--progress`::
--progress::
Progress status is reported on the standard error stream
by default when it is attached to a terminal, unless `-q`
by default when it is attached to a terminal, unless -q
is specified. This flag forces progress status even if the
standard error stream is not directed to a terminal.
`--no-recurse-submodules`::
`--recurse-submodules=(check|on-demand|only|no)`::
--no-recurse-submodules::
--recurse-submodules=check|on-demand|only|no::
May be used to make sure all submodule commits used by the
revisions to be pushed are available on a remote-tracking branch.
Possible values are:
`check`;;
Git will verify that all submodule commits that
If 'check' is used Git will verify that all submodule commits that
changed in the revisions to be pushed are available on at least one
remote of the submodule. If any commits are missing the push will
be aborted and exit with non-zero status.
`on-demand`;;
be aborted and exit with non-zero status. If 'on-demand' is used
all submodules that changed in the revisions to be pushed will be
pushed. If `on-demand` was not able to push all necessary revisions it will
also be aborted and exit with non-zero status.
`only`;;
all submodules will be pushed while the superproject is left
unpushed.
`no`;;
override the `push.recurseSubmodules` configuration variable when no
submodule recursion is required. Similar to using `--no-recurse-submodules`.
pushed. If on-demand was not able to push all necessary revisions it will
also be aborted and exit with non-zero status. If 'only' is used all
submodules will be pushed while the superproject is left
unpushed. A value of 'no' or using `--no-recurse-submodules` can be used
to override the push.recurseSubmodules configuration variable when no
submodule recursion is required.
+
When using `on-demand` or `only`, if a submodule has a
`push.recurseSubmodules=(on-demand|only)` or `submodule.recurse` configuration,
further recursion will occur. In this case, `only` is treated as `on-demand`.
When using 'on-demand' or 'only', if a submodule has a
"push.recurseSubmodules={on-demand,only}" or "submodule.recurse" configuration,
further recursion will occur. In this case, "only" is treated as "on-demand".
`--verify`::
`--no-verify`::
--verify::
--no-verify::
Toggle the pre-push hook (see linkgit:githooks[5]). The
default is `--verify`, giving the hook a chance to prevent the
push. With `--no-verify`, the hook is bypassed completely.
default is --verify, giving the hook a chance to prevent the
push. With --no-verify, the hook is bypassed completely.
`-4`::
`--ipv4`::
-4::
--ipv4::
Use IPv4 addresses only, ignoring IPv6 addresses.
`-6`::
`--ipv6`::
-6::
--ipv6::
Use IPv6 addresses only, ignoring IPv4 addresses.
include::urls-remotes.adoc[]
@ -444,16 +427,16 @@ representing the status of a single ref. Each line is of the form:
<flag> <summary> <from> -> <to> (<reason>)
-------------------------------
If `--porcelain` is used, then each line of the output is of the form:
If --porcelain is used, then each line of the output is of the form:
-------------------------------
<flag> \t <from>:<to> \t <summary> (<reason>)
-------------------------------
The status of up-to-date refs is shown only if `--porcelain` or `--verbose`
The status of up-to-date refs is shown only if --porcelain or --verbose
option is used.
_<flag>_::
flag::
A single character indicating the status of the ref:
(space);; for a successfully pushed fast-forward;
`+`;; for a successful forced update;
@ -462,7 +445,7 @@ _<flag>_::
`!`;; for a ref that was rejected or failed to push; and
`=`;; for a ref that was up to date and did not need pushing.
_<summary>_::
summary::
For a successfully pushed ref, the summary shows the old and new
values of the ref in a form suitable for using as an argument to
`git log` (this is `<old>..<new>` in most cases, and
@ -603,7 +586,7 @@ Updating A with the resulting merge commit will fast-forward and your
push will be accepted.
Alternatively, you can rebase your change between X and B on top of A,
with `git pull --rebase`, and push the result back. The rebase will
with "git pull --rebase", and push the result back. The rebase will
create a new commit D that builds the change between X and B on top of
A.
@ -621,12 +604,12 @@ accepted.
There is another common situation where you may encounter non-fast-forward
rejection when you try to push, and it is possible even when you are
pushing into a repository nobody else pushes into. After you push commit
A yourself (in the first picture in this section), replace it with `git
commit --amend` to produce commit B, and you try to push it out, because
A yourself (in the first picture in this section), replace it with "git
commit --amend" to produce commit B, and you try to push it out, because
forgot that you have pushed A out already. In such a case, and only if
you are certain that nobody in the meantime fetched your earlier commit A
(and started building on top of it), you can run `git push --force` to
overwrite it. In other words, `git push --force` is a method reserved for
(and started building on top of it), you can run "git push --force" to
overwrite it. In other words, "git push --force" is a method reserved for
a case where you do mean to lose history.
@ -644,18 +627,18 @@ EXAMPLES
variable) if it has the same name as the current branch, and
errors out without pushing otherwise.
+
The default behavior of this command when no _<refspec>_ is given can be
The default behavior of this command when no <refspec> is given can be
configured by setting the `push` option of the remote, or the `push.default`
configuration variable.
+
For example, to default to pushing only the current branch to `origin`
use `git config remote.origin.push HEAD`. Any valid _<refspec>_ (like
use `git config remote.origin.push HEAD`. Any valid <refspec> (like
the ones in the examples below) can be configured as the default for
`git push origin`.
`git push origin :`::
Push "matching" branches to `origin`. See
_<refspec>_ in the <<OPTIONS,OPTIONS>> section above for a
<refspec> in the <<OPTIONS,OPTIONS>> section above for a
description of "matching" branches.
`git push origin master`::

View File

@ -87,7 +87,7 @@ of the to-be-rebased branch. However, `ORIG_HEAD` is not guaranteed to still
point to that commit at the end of the rebase if other commands that change
`ORIG_HEAD` (like `git reset`) are used during the rebase. The previous branch
tip, however, is accessible using the reflog of the current branch (i.e. `@{1}`,
see linkgit:gitrevisions[7]).
see linkgit:gitrevisions[7].
TRANSPLANTING A TOPIC BRANCH WITH --ONTO
----------------------------------------
@ -474,13 +474,6 @@ See also INCOMPATIBLE OPTIONS below.
Instead of using the current time as the committer date, use
the author date of the commit being rebased as the committer
date. This option implies `--force-rebase`.
+
WARNING: The history walking machinery assumes that commits have
non-decreasing commit timestamps. You should consider if you really need
to use this option. Then you should only use this option to override the
committer date when rebasing commits on top of a base which commit is
older (in terms of the commit date) than the oldest commit you are
applying (in terms of the author date).
--ignore-date::
--reset-author-date::

View File

@ -77,14 +77,14 @@ to the new separate pack will be written.
Only useful with `--cruft -d`.
--max-cruft-size=<n>::
Override `--max-pack-size` for cruft packs. Inherits the value of
Overrides `--max-pack-size` for cruft packs. Inherits the value of
`--max-pack-size` (if any) by default. See the documentation for
`--max-pack-size` for more details.
--combine-cruft-below-size=<n>::
When generating cruft packs without pruning, only repack
existing cruft packs whose size is strictly less than `<n>`
bytes, which can optionally
existing cruft packs whose size is strictly less than `<n>`,
where `<n>` represents a number of bytes, which can optionally
be suffixed with "k", "m", or "g". Cruft packs whose size is
greater than or equal to `<n>` are left as-is and not repacked.
Useful when you want to avoid repacking large cruft pack(s) in

View File

@ -9,17 +9,16 @@ git-replay - EXPERIMENTAL: Replay commits on a new base, works with bare repos t
SYNOPSIS
--------
[verse]
(EXPERIMENTAL!) 'git replay' ([--contained] --onto <newbase> | --advance <branch>) [--ref-action[=<mode>]] <revision-range>
(EXPERIMENTAL!) 'git replay' ([--contained] --onto <newbase> | --advance <branch>) <revision-range>...
DESCRIPTION
-----------
Takes a range of commits and replays them onto a new location. Leaves
the working tree and the index untouched. By default, updates the
relevant references using an atomic transaction (all refs update or
none). Use `--ref-action=print` to avoid automatic ref updates and
instead get update commands that can be piped to `git update-ref --stdin`
(see the <<output,OUTPUT>> section below).
Takes ranges of commits and replays them onto a new location. Leaves
the working tree and the index untouched, and updates no references.
The output of this command is meant to be used as input to
`git update-ref --stdin`, which would update the relevant branches
(see the OUTPUT section below).
THIS COMMAND IS EXPERIMENTAL. THE BEHAVIOR MAY CHANGE.
@ -30,51 +29,33 @@ OPTIONS
Starting point at which to create the new commits. May be any
valid commit, and not just an existing branch name.
+
When `--onto` is specified, the branch(es) in the revision range will be
updated to point at the new commits, similar to the way `git rebase --update-refs`
updates multiple branches in the affected range.
When `--onto` is specified, the update-ref command(s) in the output will
update the branch(es) in the revision range to point at the new
commits, similar to the way how `git rebase --update-refs` updates
multiple branches in the affected range.
--advance <branch>::
Starting point at which to create the new commits; must be a
branch name.
+
The history is replayed on top of the <branch> and <branch> is updated to
point at the tip of the resulting history. This is different from `--onto`,
which uses the target only as a starting point without updating it.
--contained::
Update all branches that point at commits in
<revision-range>. Requires `--onto`.
--ref-action[=<mode>]::
Control how references are updated. The mode can be:
+
--
* `update` (default): Update refs directly using an atomic transaction.
All refs are updated or none are (all-or-nothing behavior).
* `print`: Output update-ref commands for pipeline use. This is the
traditional behavior where output can be piped to `git update-ref --stdin`.
--
+
The default mode can be configured via the `replay.refAction` configuration variable.
When `--advance` is specified, the update-ref command(s) in the output
will update the branch passed as an argument to `--advance` to point at
the new commits (in other words, this mimics a cherry-pick operation).
<revision-range>::
Range of commits to replay; see "Specifying Ranges" in
linkgit:git-rev-parse[1]. In `--advance <branch>` mode, the
range should have a single tip, so that it's clear to which tip the
advanced <branch> should point.
Range of commits to replay. More than one <revision-range> can
be passed, but in `--advance <branch>` mode, they should have
a single tip, so that it's clear where <branch> should point
to. See "Specifying Ranges" in linkgit:git-rev-parse[1] and the
"Commit Limiting" options below.
include::rev-list-options.adoc[]
[[output]]
OUTPUT
------
By default, or with `--ref-action=update`, this command produces no output on
success, as refs are updated directly using an atomic transaction.
When using `--ref-action=print`, the output is usable as input to
`git update-ref --stdin`. It is of the form:
When there are no conflicts, the output of this command is usable as
input to `git update-ref --stdin`. It is of the form:
update refs/heads/branch1 ${NEW_branch1_HASH} ${OLD_branch1_HASH}
update refs/heads/branch2 ${NEW_branch2_HASH} ${OLD_branch2_HASH}
@ -85,10 +66,6 @@ the shape of the history being replayed. When using `--advance`, the
number of refs updated is always one, but for `--onto`, it can be one
or more (rebasing multiple branches simultaneously is supported).
There is no stderr output on conflicts; see the <<exit-status,EXIT
STATUS>> section below.
[[exit-status]]
EXIT STATUS
-----------
@ -104,14 +81,6 @@ To simply rebase `mybranch` onto `target`:
------------
$ git replay --onto target origin/main..mybranch
------------
The refs are updated atomically and no output is produced on success.
To see what would be updated without actually updating:
------------
$ git replay --ref-action=print --onto target origin/main..mybranch
update refs/heads/mybranch ${NEW_mybranch_HASH} ${OLD_mybranch_HASH}
------------
@ -119,29 +88,33 @@ To cherry-pick the commits from mybranch onto target:
------------
$ git replay --advance target origin/main..mybranch
update refs/heads/target ${NEW_target_HASH} ${OLD_target_HASH}
------------
Note that the first two examples replay the exact same commits and on
top of the exact same new base, they only differ in that the first
updates mybranch to point at the new commits and the second updates
target to point at them.
provides instructions to make mybranch point at the new commits and
the second provides instructions to make target point at them.
What if you have a stack of branches, one depending upon another, and
you'd really like to rebase the whole set?
------------
$ git replay --contained --onto origin/main origin/main..tipbranch
update refs/heads/branch1 ${NEW_branch1_HASH} ${OLD_branch1_HASH}
update refs/heads/branch2 ${NEW_branch2_HASH} ${OLD_branch2_HASH}
update refs/heads/tipbranch ${NEW_tipbranch_HASH} ${OLD_tipbranch_HASH}
------------
All three branches (`branch1`, `branch2`, and `tipbranch`) are updated
atomically.
When calling `git replay`, one does not need to specify a range of
commits to replay using the syntax `A..B`; any range expression will
do:
------------
$ git replay --onto origin/main ^base branch1 branch2 branch3
update refs/heads/branch1 ${NEW_branch1_HASH} ${OLD_branch1_HASH}
update refs/heads/branch2 ${NEW_branch2_HASH} ${OLD_branch2_HASH}
update refs/heads/branch3 ${NEW_branch3_HASH} ${OLD_branch3_HASH}
------------
This will simultaneously rebase `branch1`, `branch2`, and `branch3`,

View File

@ -8,8 +8,8 @@ git-repo - Retrieve information about the repository
SYNOPSIS
--------
[synopsis]
git repo info [--format=(keyvalue|nul) | -z] [--all | <key>...]
git repo structure [--format=(table|keyvalue|nul) | -z]
git repo info [--format=(keyvalue|nul)] [-z] [<key>...]
git repo structure [--format=(table|keyvalue|nul)]
DESCRIPTION
-----------
@ -19,13 +19,13 @@ THIS COMMAND IS EXPERIMENTAL. THE BEHAVIOR MAY CHANGE.
COMMANDS
--------
`info [--format=(keyvalue|nul) | -z] [--all | <key>...]`::
`info [--format=(keyvalue|nul)] [-z] [<key>...]`::
Retrieve metadata-related information about the current repository. Only
the requested data will be returned based on their keys (see "INFO KEYS"
section below).
+
The values are returned in the same order in which their respective keys were
requested. The `--all` flag requests the values for all the available keys.
requested.
+
The output format can be chosen through the flag `--format`. Two formats are
supported:
@ -44,14 +44,13 @@ supported:
+
`-z` is an alias for `--format=nul`.
`structure [--format=(table|keyvalue|nul) | -z]`::
`structure [--format=(table|keyvalue|nul)]`::
Retrieve statistics about the current repository structure. The
following kinds of information are reported:
+
* Reference counts categorized by type
* Reachable object counts categorized by type
* Total inflated size of reachable objects by type
* Total disk size of reachable objects by type
+
The output format can be chosen through the flag `--format`. Three formats are
supported:
@ -73,8 +72,6 @@ supported:
the delimiter between the key and value instead of '='. Unlike the
`keyvalue` format, values containing "unusual" characters are never
quoted.
+
`-z` is an alias for `--format=nul`.
INFO KEYS
---------

View File

@ -208,7 +208,7 @@ Sending
for your own case. Default is the value of `sendemail.smtpEncryption`.
--smtp-domain=<FQDN>::
Specify the Fully Qualified Domain Name (FQDN) used in the
Specifies the Fully Qualified Domain Name (FQDN) used in the
HELO/EHLO command to the SMTP server. Some servers require the
FQDN to match your IP address. If not set, `git send-email` attempts
to determine your FQDN automatically. Default is the value of
@ -245,7 +245,7 @@ a password is obtained using linkgit:git-credential[1].
Disable SMTP authentication. Short hand for `--smtp-auth=none`.
--smtp-server=<host>::
Specify the outgoing SMTP server to use (e.g.
If set, specifies the outgoing SMTP server to use (e.g.
`smtp.example.com` or a raw IP address). If unspecified, and if
`--sendmail-cmd` is also unspecified, the default is to search
for `sendmail` in `/usr/sbin`, `/usr/lib` and `$PATH` if such a
@ -258,7 +258,7 @@ command names. For those use cases, consider using `--sendmail-cmd`
instead.
--smtp-server-port=<port>::
Specify a port different from the default port (SMTP
Specifies a port different from the default port (SMTP
servers typically listen to smtp port 25, but may also listen to
submission port 587, or the common SSL smtp port 465);
symbolic port names (e.g. `submission` instead of 587)
@ -266,7 +266,7 @@ instead.
`sendemail.smtpServerPort` configuration variable.
--smtp-server-option=<option>::
Specify the outgoing SMTP server option to use.
If set, specifies the outgoing SMTP server option to use.
Default value can be specified by the `sendemail.smtpServerOption`
configuration option.
+
@ -277,7 +277,7 @@ must be used for each option.
--smtp-ssl::
Legacy alias for `--smtp-encryption ssl`.
--smtp-ssl-cert-path <path>::
--smtp-ssl-cert-path::
Path to a store of trusted CA certificates for SMTP SSL/TLS
certificate validation (either a directory that has been processed
by `c_rehash`, or a single file containing one or more PEM format
@ -321,6 +321,7 @@ for instructions.
If disabled with `--no-use-imap-only`, the emails will be sent like usual.
Disabled by default, but the `sendemail.useImapOnly` configuration
variable can be used to enable it.
+
This feature requires setting up `git imap-send`. See linkgit:git-imap-send[1]
for instructions.
@ -346,11 +347,11 @@ Automating
--no-to::
--no-cc::
--no-bcc::
Clear any list of `To:`, `Cc:`, `Bcc:` addresses previously
Clears any list of `To:`, `Cc:`, `Bcc:` addresses previously
set via config.
--no-identity::
Clear the previously read value of `sendemail.identity` set
Clears the previously read value of `sendemail.identity` set
via config, if any.
--to-cmd=<command>::
@ -509,12 +510,12 @@ have been specified, in which case default to `compose`.
Currently, validation means the following:
+
--
* Invoke the sendemail-validate hook if present (see linkgit:githooks[5]).
* Warn of patches that contain lines longer than
998 characters unless a suitable transfer encoding
(`auto`, `base64`, or `quoted-printable`) is used;
this is due to SMTP limits as described by
https://www.ietf.org/rfc/rfc5322.txt.
* Invoke the sendemail-validate hook if present (see linkgit:githooks[5]).
* Warn of patches that contain lines longer than
998 characters unless a suitable transfer encoding
(`auto`, `base64`, or `quoted-printable`) is used;
this is due to SMTP limits as described by
https://www.ietf.org/rfc/rfc5322.txt.
--
+
Default is the value of `sendemail.validate`; if this is not set,

View File

@ -104,7 +104,7 @@ associated with a new unborn branch named _<branch>_ (after
passed to the command. In the event the repository has a remote and
`--guess-remote` is used, but no remote or local branches exist, then the
command fails with a warning reminding the user to fetch from their remote
first (or override by using `-f`/`--force`).
first (or override by using `-f/--force`).
`list`::

View File

@ -223,7 +223,7 @@ Options that take a filename allow a prefix `:(optional)`. For example:
----------------------------
git commit -F :(optional)COMMIT_EDITMSG
# if COMMIT_EDITMSG does not exist, the above is equivalent to
# if COMMIT_EDITMSG does not exist, equivalent to
git commit
----------------------------

View File

@ -1,305 +0,0 @@
gitdatamodel(7)
===============
NAME
----
gitdatamodel - Git's core data model
SYNOPSIS
--------
gitdatamodel
DESCRIPTION
-----------
It's not necessary to understand Git's data model to use Git, but it's
very helpful when reading Git's documentation so that you know what it
means when the documentation says "object", "reference" or "index".
Git's core operations use 4 kinds of data:
1. <<objects,Objects>>: commits, trees, blobs, and tag objects
2. <<references,References>>: branches, tags,
remote-tracking branches, etc
3. <<index,The index>>, also known as the staging area
4. <<reflogs,Reflogs>>: logs of changes to references ("ref log")
[[objects]]
OBJECTS
-------
All of the commits and files in a Git repository are stored as "Git objects".
Git objects never change after they're created, and every object has an ID,
like `1b61de420a21a2f1aaef93e38ecd0e45e8bc9f0a`.
This means that if you have an object's ID, you can always recover its
exact contents as long as the object hasn't been deleted.
Every object has:
[[object-id]]
1. an *ID* (aka "object name"), which is a cryptographic hash of its
type and contents.
It's fast to look up a Git object using its ID.
This is usually represented in hexadecimal, like
`1b61de420a21a2f1aaef93e38ecd0e45e8bc9f0a`.
2. a *type*. There are 4 types of objects:
<<commit,commits>>, <<tree,trees>>, <<blob,blobs>>,
and <<tag-object,tag objects>>.
3. *contents*. The structure of the contents depends on the type.
Here's how each type of object is structured:
[[commit]]
commit::
A commit contains these required fields
(though there are other optional fields):
+
1. The full directory structure of all the files in that version of the
repository and each file's contents, stored as the *<<tree,tree>>* ID
of the commit's top-level directory
2. Its *parent commit ID(s)*. The first commit in a repository has 0 parents,
regular commits have 1 parent, merge commits have 2 or more parents
3. An *author* and the time the commit was authored
4. A *committer* and the time the commit was committed
5. A *commit message*
+
Here's how an example commit is stored:
+
----
tree 1b61de420a21a2f1aaef93e38ecd0e45e8bc9f0a
parent 4ccb6d7b8869a86aae2e84c56523f8705b50c647
author Maya <maya@example.com> 1759173425 -0400
committer Maya <maya@example.com> 1759173425 -0400
Add README
----
+
Like all other objects, commits can never be changed after they're created.
For example, "amending" a commit with `git commit --amend` creates a new
commit with the same parent.
+
Git does not store the diff for a commit: when you ask Git to show
the commit with linkgit:git-show[1], it calculates the diff from its
parent on the fly.
[[tree]]
tree::
A tree is how Git represents a directory.
It can contain files or other trees (which are subdirectories).
It lists, for each item in the tree:
+
1. The *filename*, for example `hello.py`
2. The *file type*, which must be one of these five types:
- *regular file*
- *executable file*
- *symbolic link*
- *directory*
- *gitlink* (for use with submodules)
3. The <<object-id,*object ID*>> with the contents of the file, directory,
or gitlink.
+
For example, this is how a tree containing one directory (`src`) and one file
(`README.md`) is stored:
+
----
100644 blob 8728a858d9d21a8c78488c8b4e70e531b659141f README.md
040000 tree 89b1d2e0495f66d6929f4ff76ff1bb07fc41947d src
----
NOTE: In the output above, Git displays the file type of each tree entry
using a format that's loosely modelled on Unix file modes (`100644` is
"regular file", `100755` is "executable file", `120000` is "symbolic
link", `040000` is "directory", and `160000` is "gitlink"). It also
displays the object's type: `blob` for files and symlinks, `tree` for
directories, and `commit` for gitlinks.
[[blob]]
blob::
A blob object contains a file's contents.
+
When you make a commit, Git stores the full contents of each file that
you changed as a blob.
For example, if you have a commit that changes 2 files in a repository
with 1000 files, that commit will create 2 new blobs, and use the
previous blob ID for the other 998 files.
This means that commits can use relatively little disk space even in a
very large repository.
[[tag-object]]
tag object::
Tag objects contain these required fields
(though there are other optional fields):
+
1. The *ID* of the object it references
2. The *type* of the object it references
3. The *tagger* and tag date
4. A *tag message*, similar to a commit message
Here's how an example tag object is stored:
----
object 750b4ead9c87ceb3ddb7a390e6c7074521797fb3
type commit
tag v1.0.0
tagger Maya <maya@example.com> 1759927359 -0400
Release version 1.0.0
----
NOTE: All of the examples in this section were generated with
`git cat-file -p <object-id>`.
[[references]]
REFERENCES
----------
References are a way to give a name to a commit.
It's easier to remember "the changes I'm working on are on the `turtle`
branch" than "the changes are in commit bb69721404348e".
Git often uses "ref" as shorthand for "reference".
References can either refer to:
1. An object ID, usually a <<commit,commit>> ID
2. Another reference. This is called a "symbolic reference"
References are stored in a hierarchy, and Git handles references
differently based on where they are in the hierarchy.
Most references are under `refs/`. Here are the main types:
[[branch]]
branches: `refs/heads/<name>`::
A branch refers to a commit ID.
That commit is the latest commit on the branch.
+
To get the history of commits on a branch, Git will start at the commit
ID the branch references, and then look at the commit's parent(s),
the parent's parent, etc.
[[tag]]
tags: `refs/tags/<name>`::
A tag refers to a commit ID, tag object ID, or other object ID.
There are two types of tags:
1. "Annotated tags", which reference a <<tag-object,tag object>> ID
which contains a tag message
2. "Lightweight tags", which reference a commit, blob, or tree ID
directly
+
Even though branches and tags both refer to a commit ID, Git
treats them very differently.
Branches are expected to change over time: when you make a commit, Git
will update your <<HEAD,current branch>> to point to the new commit.
Tags are usually not changed after they're created.
[[HEAD]]
HEAD: `HEAD`::
`HEAD` is where Git stores your current <<branch,branch>>,
if there is a current branch. `HEAD` can either be:
+
1. A symbolic reference to your current branch, for example `ref:
refs/heads/main` if your current branch is `main`.
2. A direct reference to a commit ID. In this case there is no current branch.
This is called "detached HEAD state", see the DETACHED HEAD section
of linkgit:git-checkout[1] for more.
[[remote-tracking-branch]]
remote-tracking branches: `refs/remotes/<remote>/<branch>`::
A remote-tracking branch refers to a commit ID.
It's how Git stores the last-known state of a branch in a remote
repository. `git fetch` updates remote-tracking branches. When
`git status` says "you're up to date with origin/main", it's looking at
this.
+
`refs/remotes/<remote>/HEAD` is a symbolic reference to the remote's
default branch. This is the branch that `git clone` checks out by default.
[[other-refs]]
Other references::
Git tools may create references anywhere under `refs/`.
For example, linkgit:git-stash[1], linkgit:git-bisect[1],
and linkgit:git-notes[1] all create their own references
in `refs/stash`, `refs/bisect`, etc.
Third-party Git tools may also create their own references.
+
Git may also create references other than `HEAD` at the base of the
hierarchy, like `ORIG_HEAD`.
NOTE: Git may delete objects that aren't "reachable" from any reference
or <<reflogs,reflog>>.
An object is "reachable" if we can find it by following tags to whatever
they tag, commits to their parents or trees, and trees to the trees or
blobs that they contain.
For example, if you amend a commit with `git commit --amend`,
there will no longer be a branch that points at the old commit.
The old commit is recorded in the current branch's <<reflogs,reflog>>,
so it is still "reachable", but when the reflog entry expires it may
become unreachable and get deleted.
Reachable objects will never be deleted.
[[index]]
THE INDEX
---------
The index, also known as the "staging area", is a list of files and
the contents of each file, stored as a <<blob,blob>>.
You can add files to the index or update the contents of a file in the
index with linkgit:git-add[1]. This is called "staging" the file for commit.
Unlike a <<tree,tree>>, the index is a flat list of files.
When you commit, Git converts the list of files in the index to a
directory <<tree,tree>> and uses that tree in the new <<commit,commit>>.
Each index entry has 4 fields:
1. The *file type*, which must be one of:
- *regular file*
- *executable file*
- *symbolic link*
- *gitlink* (for use with submodules)
2. The *<<blob,blob>>* ID of the file,
or (rarely) the *<<commit,commit>>* ID of the submodule
3. The *stage number*, either 0, 1, 2, or 3. This is normally 0, but if
there's a merge conflict there can be multiple versions of the same
filename in the index.
4. The *file path*, for example `src/hello.py`
It's extremely uncommon to look at the index directly: normally you'd
run `git status` to see a list of changes between the index and <<HEAD,HEAD>>.
But you can use `git ls-files --stage` to see the index.
Here's the output of `git ls-files --stage` in a repository with 2 files:
----
100644 8728a858d9d21a8c78488c8b4e70e531b659141f 0 README.md
100644 665c637a360874ce43bf74018768a96d2d4d219a 0 src/hello.py
----
[[reflogs]]
REFLOGS
-------
Every time a branch, remote-tracking branch, or HEAD is updated, Git
updates a log called a "reflog" for that <<references,reference>>.
This means that if you make a mistake and "lose" a commit, you can
generally recover the commit ID by running `git reflog <reference>`.
A reflog is a list of log entries. Each entry has:
1. The *commit ID*
2. *Timestamp* when the change was made
3. *Log message*, for example `pull: Fast-forward`
Reflogs only log changes made in your local repository.
They are not shared with remotes.
You can view a reflog with `git reflog <reference>`.
For example, here's the reflog for a `main` branch which has changed twice:
----
$ git reflog main --date=iso --no-decorate
750b4ea main@{2025-09-29 15:17:05 -0400}: commit: Add README
4ccb6d7 main@{2025-09-29 15:16:48 -0400}: commit (initial): Initial commit
----
GIT
---
Part of the linkgit:git[1] suite

View File

@ -83,25 +83,6 @@ Windows would be the configuration `"C:\Program Files\Vim\gvim.exe" --nofork`,
which quotes the filename with spaces and specifies the `--nofork` option to
avoid backgrounding the process.
[[sign-off]]
Why not have `commit.signoff` and other configuration variables?::
Git intentionally does not (and will not) provide a
configuration variable, such as `commit.signoff`, to
automatically add `--signoff` by default. The reason is to
protect the legal and intentional significance of a sign-off.
If there were more automated and widely publicized ways for
sign-offs to be appended, it would become easier for someone
to argue later that a "Signed-off-by" trailer was just added
out of habit or by automation, without the committer's full
awareness or intent to certify their agreement with the
Developer Certificate of Origin (DCO) or a similar statement.
This could undermine the sign-offs credibility in legal or
contractual situations.
+
There exists `format.signoff`, but that is a historical mistake, and
it is not an excuse to add more mistakes of the same kind on top.
Credentials
-----------

View File

@ -103,14 +103,17 @@ invoked before obtaining the proposed commit log message and
making a commit. Exiting with a non-zero status from this script
causes the `git commit` command to abort before creating a commit.
The default 'pre-commit' hook, when enabled, catches introduction
of lines with trailing whitespaces and aborts the commit when
such a line is found.
All the `git commit` hooks are invoked with the environment
variable `GIT_EDITOR=:` if the command will not bring up an editor
to modify the commit message.
The default 'pre-commit' hook, when enabled, prevents the introduction
of non-ASCII filenames and lines with trailing whitespace. The non-ASCII
check can be turned off by setting the `hooks.allownonascii` config
option to `true`.
The default 'pre-commit' hook, when enabled--and with the
`hooks.allownonascii` config option unset or set to false--prevents
the use of non-ASCII filenames.
pre-merge-commit
~~~~~~~~~~~~~~~~

View File

@ -443,8 +443,7 @@ If no "want" objects are received, send an error:
TODO: Define error if no "want" lines are requested.
If any "want" object is not reachable, send an error:
When a Git server receives an invalid or malformed `want` line, it
responds with an error message that includes the offending object name.
TODO: Define error if an invalid "want" is requested.
Create an empty list, `s_common`.

View File

@ -297,8 +297,8 @@ This commit is referred to as a "merge commit", or sometimes just a
identified by its <<def_object_name,object name>>. The objects usually
live in `$GIT_DIR/objects/`.
[[def_object_identifier]]object identifier, object ID, oid::
Synonyms for <<def_object_name,object name>>.
[[def_object_identifier]]object identifier (oid)::
Synonym for <<def_object_name,object name>>.
[[def_object_name]]object name::
The unique identifier of an <<def_object,object>>. The

View File

@ -35,7 +35,7 @@ doc_targets += custom_target(
output: 'howto-index.html',
depends: documentation_deps,
install: true,
install_dir: htmldir,
install_dir: get_option('datadir') / 'doc/git-doc',
)
foreach howto : howto_sources
@ -57,6 +57,6 @@ foreach howto : howto_sources
output: fs.stem(howto_stripped.full_path()) + '.html',
depends: documentation_deps,
install: true,
install_dir: htmldir / 'howto',
install_dir: get_option('datadir') / 'doc/git-doc/howto',
)
endforeach

View File

@ -56,7 +56,7 @@ ifdef::git-pull[]
`--ff-only`::
Only update to the new history if there is no divergent local
history. This is the default when no method for reconciling
divergent histories is provided (via the `--rebase` flags).
divergent histories is provided (via the --rebase=* flags).
`--ff`::
`--no-ff`::

View File

@ -193,7 +193,6 @@ manpages = {
'gitcore-tutorial.adoc' : 7,
'gitcredentials.adoc' : 7,
'gitcvs-migration.adoc' : 7,
'gitdatamodel.adoc' : 7,
'gitdiffcore.adoc' : 7,
'giteveryday.adoc' : 7,
'gitfaq.adoc' : 7,
@ -413,7 +412,7 @@ foreach manpage, category : manpages
input: manpage,
output: fs.stem(manpage) + '.html',
install: true,
install_dir: htmldir,
install_dir: get_option('datadir') / 'doc/git-doc',
)
endif
endforeach
@ -424,7 +423,7 @@ if get_option('docs').contains('html')
output: 'docinfo.html',
copy: true,
install: true,
install_dir: htmldir,
install_dir: get_option('datadir') / 'doc/git-doc',
)
configure_file(
@ -432,11 +431,11 @@ if get_option('docs').contains('html')
output: 'docbook-xsl.css',
copy: true,
install: true,
install_dir: htmldir,
install_dir: get_option('datadir') / 'doc/git-doc',
)
install_symlink('index.html',
install_dir: htmldir,
install_dir: get_option('datadir') / 'doc/git-doc',
pointing_to: 'git.html',
)
@ -467,7 +466,7 @@ if get_option('docs').contains('html')
input: 'docbook.xsl',
output: 'user-manual.html',
install: true,
install_dir: htmldir,
install_dir: get_option('datadir') / 'doc/git-doc',
)
articles = [
@ -493,7 +492,7 @@ if get_option('docs').contains('html')
output: fs.stem(article) + '.html',
depends: documentation_deps,
install: true,
install_dir: htmldir,
install_dir: get_option('datadir') / 'doc/git-doc',
)
endforeach

View File

@ -1,20 +1,20 @@
_<repository>_::
<repository>::
The "remote" repository that is the source of a fetch
or pull operation. This parameter can be either a URL
(see the section <<URLS,GIT URLS>> below) or the name
of a remote (see the section <<REMOTES,REMOTES>> below).
ifndef::git-pull[]
_<group>_::
<group>::
A name referring to a list of repositories as the value
of `remotes.<group>` in the configuration file.
of remotes.<group> in the configuration file.
(See linkgit:git-config[1]).
endif::git-pull[]
[[fetch-refspec]]
_<refspec>_::
<refspec>::
Specifies which refs to fetch and which local refs to update.
When no __<refspec>__s appear on the command line, the refs to fetch
When no <refspec>s appear on the command line, the refs to fetch
are read from `remote.<repository>.fetch` variables instead
ifndef::git-pull[]
(see <<CRTB,CONFIGURED REMOTE-TRACKING BRANCHES>> below).
@ -24,18 +24,18 @@ ifdef::git-pull[]
in linkgit:git-fetch[1]).
endif::git-pull[]
+
The format of a _<refspec>_ parameter is an optional plus
`+`, followed by the source _<src>_, followed
by a colon `:`, followed by the destination _<dst>_.
The colon can be omitted when _<dst>_ is empty. _<src>_ is
The format of a <refspec> parameter is an optional plus
`+`, followed by the source <src>, followed
by a colon `:`, followed by the destination <dst>.
The colon can be omitted when <dst> is empty. <src> is
typically a ref, or a glob pattern with a single `*` that is used
to match a set of refs, but it can also be a fully spelled hex object
name.
+
A _<refspec>_ may contain a `*` in its _<src>_ to indicate a simple pattern
A <refspec> may contain a `*` in its <src> to indicate a simple pattern
match. Such a refspec functions like a glob that matches any ref with the
pattern. A pattern _<refspec>_ must have one and only one `*` in both the _<src>_ and
_<dst>_. It will map refs to the destination by replacing the `*` with the
pattern. A pattern <refspec> must have one and only one `*` in both the <src> and
<dst>. It will map refs to the destination by replacing the `*` with the
contents matched from the source.
+
If a refspec is prefixed by `^`, it will be interpreted as a negative
@ -45,14 +45,14 @@ considered to match if it matches at least one positive refspec, and does
not match any negative refspec. Negative refspecs can be useful to restrict
the scope of a pattern refspec so that it will not include specific refs.
Negative refspecs can themselves be pattern refspecs. However, they may only
contain a _<src>_ and do not specify a _<dst>_. Fully spelled out hex object
contain a <src> and do not specify a <dst>. Fully spelled out hex object
names are also not supported.
+
`tag <tag>` means the same as `refs/tags/<tag>:refs/tags/<tag>`;
it requests fetching everything up to the given tag.
+
The remote ref that matches _<src>_
is fetched, and if _<dst>_ is not an empty string, an attempt
The remote ref that matches <src>
is fetched, and if <dst> is not an empty string, an attempt
is made to update the local ref that matches it.
+
Whether that update is allowed without `--force` depends on the ref
@ -60,7 +60,7 @@ namespace it's being fetched to, the type of object being fetched, and
whether the update is considered to be a fast-forward. Generally, the
same rules apply for fetching as when pushing, see the `<refspec>...`
section of linkgit:git-push[1] for what those are. Exceptions to those
rules particular to `git fetch` are noted below.
rules particular to 'git fetch' are noted below.
+
Until Git version 2.20, and unlike when pushing with
linkgit:git-push[1], any updates to `refs/tags/*` would be accepted
@ -101,19 +101,19 @@ must know this is the expected usage pattern for a branch.
ifdef::git-pull[]
+
[NOTE]
There is a difference between listing multiple _<refspec>_
directly on `git pull` command line and having multiple
There is a difference between listing multiple <refspec>
directly on 'git pull' command line and having multiple
`remote.<repository>.fetch` entries in your configuration
for a _<repository>_ and running a
`git pull` command without any explicit _<refspec>_ parameters.
__<refspec>__s listed explicitly on the command line are always
for a <repository> and running a
'git pull' command without any explicit <refspec> parameters.
<refspec>s listed explicitly on the command line are always
merged into the current branch after fetching. In other words,
if you list more than one remote ref, `git pull` will create
if you list more than one remote ref, 'git pull' will create
an Octopus merge. On the other hand, if you do not list any
explicit _<refspec>_ parameter on the command line, `git pull`
will fetch all the __<refspec>__s it finds in the
explicit <refspec> parameter on the command line, 'git pull'
will fetch all the <refspec>s it finds in the
`remote.<repository>.fetch` configuration and merge
only the first _<refspec>_ found into the current branch.
only the first <refspec> found into the current branch.
This is because making an
Octopus from remote refs is rarely done, while keeping track
of multiple remote heads in one-go by fetching more than one

View File

@ -983,9 +983,7 @@ to name units in KiB, MiB, or GiB. For example, `blob:limit=1k`
is the same as 'blob:limit=1024'.
+
The form `--filter=object:type=(tag|commit|tree|blob)` omits all objects
which are not of the requested type. Note that explicitly provided objects
ignore filters and are always printed unless `--filter-provided-objects` is
also specified.
which are not of the requested type.
+
The form `--filter=sparse:oid=<blob-ish>` uses a sparse-checkout
specification contained in the blob (or blob-expression) _<blob-ish>_

View File

@ -197,170 +197,6 @@ delete <enlistment>::
This subcommand lets you delete an existing Scalar enlistment from your
local file system, unregistering the repository.
RECOMMENDED CONFIG VALUES
-------------------------
As part of both `scalar clone` and `scalar register`, certain Git config
values are set to optimize for large repositories or cross-platform support.
These options are updated in new Git versions according to the best known
advice for large repositories, and users can get the latest recommendations
by running `scalar reconfigure [--all]`.
This section lists justifications for the config values that are set in the
latest version.
am.keepCR=true::
This setting is important for cross-platform development across Windows
and non-Windows platforms and keeping carriage return (`\r`) characters
in certain workflows.
commitGraph.changedPaths=true::
This setting helps the background maintenance steps that compute the
serialized commit-graph to also store changed-path Bloom filters. This
accelerates file history commands and allows users to automatically
benefit without running a foreground command.
commitGraph.generationVersion=1::
While the preferred version is 2 for performance reasons, existing users
that had version 1 by default will need special care in upgrading to
version 2. This is likely to change in the future as the upgrade story
solidifies.
core.autoCRLF=false::
This removes the transformation of worktree files to add CRLF line
endings when only LF line endings exist. This is removed for performance
reasons. Repositories that use tools that care about CRLF line endings
should commit the necessary files with those line endings instead.
core.logAllRefUpdates=true::
This enables the reflog on all branches. While this is a performance
cost for large repositories, it is frequently an important data source
for users to get out of bad situations or to seek support from experts.
core.safeCRLF=false::
Similar to `core.autoCRLF=false`, this disables checks around whether
the CRLF conversion is reversible. This is a performance improvement,
but can be dangerous if `core.autoCRLF` is reenabled by the user.
credential.https://dev.azure.com.useHttpPath=true::
This setting enables the `credential.useHttpPath` feature only for web
URLs for Azure DevOps. This is important for users interacting with that
service using multiple organizations and thus multiple credential
tokens.
feature.experimental=false::
This disables the "experimental" optimizations grouped under this
feature config. The expectation is that all valuable optimizations are
also set explicitly by Scalar config, and any differences are
intentional. Notable differences include several bitmap-related config
options which are disabled for client-focused Scalar repos.
feature.manyFiles=false::
This disables the "many files" optimizations grouped under this feature
config. The expectation is that all valuable optimizations are also set
explicitly by Scalar config, and any differences are intentional.
fetch.showForcedUpdates=false::
This disables the check at the end of `git fetch` that notifies the user
if the ref update was a forced update (one where the previous position
is not reachable from the latest position). This check can be very
expensive in large repositories, so is disabled and replaced with an
advice message. Set `advice.fetchShowForcedUpdates=false` to disable
this advice message.
fetch.unpackLimit=1::
This setting prevents Git from unpacking packfiles into loose objects
as they are downloaded from the server. The default limit of 100 was
intended as a way to prevent performance issues from too many packfiles,
but Scalar uses background maintenance to group packfiles and cover them
with a multi-pack-index, removing this issue.
fetch.writeCommitGraph=false::
This config setting was created to help users automatically update their
commit-graph files as they perform fetches. However, this takes time
from foreground fetches and pulls and Scalar uses background maintenance
for this function instead.
gc.auto=0::
This disables automatic garbage collection, since Scalar uses background
maintenance to keep the repository data in good shape.
gui.GCWarning=false::
Since Scalar disables garbage collection by setting `gc.auto=0`, the
`git-gui` tool may start to warn about this setting. Disable this
warning as Scalar's background maintenance configuration makes the
warning irrelevant.
index.skipHash=true::
Disable computing the hash of the index contents as it is being written.
This assists with performance, especially for large index files.
index.threads=true::
This tells Git to automatically detect how many threads it should use
when reading the index due to the default value of `core.preloadIndex`,
which enables parallel index reads. This explicit setting also enables
`index.recordOffsetTable=true` to speed up parallel index reads.
index.version=4::
This index version adds compression to the path names, reducing the size
of the index in a significant way for large repos. This is an important
performance boost.
log.excludeDecoration=refs/prefetch/*::
Since Scalar enables background maintenance with the `incremental`
strategy, this setting avoids polluting `git log` output with refs
stored by the background prefetch operations.
merge.renames=true::
When computing merges in large repos, it is particularly important to
detect renames to maximize the potential for a result that will validate
correctly. Users performing merges locally are more likely to be doing
so because a server-side merge (via pull request or similar) resulted in
conflicts. While this is the default setting, it is set specifically to
override a potential change to `diff.renames` which a user may set for
performance reasons.
merge.stat=false::
This disables a diff output after computing a merge. This improves
performance of `git merge` for large repos while reducing noisy output.
pack.useBitmaps=false::
This disables the use of `.bitmap` files attached to packfiles. Bitmap
files are optimized for server-side use, not client-side use. Scalar
disables this to avoid some performance issues that can occur if a user
accidentally creates `.bitmap` files.
pack.usePathWalk=true::
This enables the `--path-walk` option to `git pack-objects` by default.
This can accelerate the computation and compression of packfiles created
by `git push` and other repack operations.
receive.autoGC=false::
Similar to `gc.auto`, this setting is disabled in preference of
background maintenance.
status.aheadBehind=false::
This disables the ahead/behind calculation that would normally happen
during a `git status` command. This information is frequently ignored by
users but can be expensive to calculate in large repos that receive
thousands of commits per day. The calculation is replaced with an advice
message that can be disabled by disabling the `advice.statusAheadBehind`
config.
The following settings are different based on which platform is in use:
core.untrackedCache=(true|false)::
The untracked cache feature is important for performance benefits on
large repositories, but has demonstrated some bugs on Windows
filesystems. Thus, this is set for other platforms but disabled on
Windows.
http.sslBackend=schannel::
On Windows, the `openssl` backend has some issues with certain types of
remote providers and certificate types. Override the default setting to
avoid these common problems.
SEE ALSO
--------
linkgit:git-clone[1], linkgit:git-maintenance[1].

View File

@ -16,7 +16,3 @@ endif::git-commit[]
+
The `--no-signoff` option can be used to countermand an earlier `--signoff`
option on the command line.
+
Git does not (and will not) have a configuration variable to enable
the `--signoff` command line option by default; see the
`commit.signoff` entry in linkgit:gitfaq[7] for more details.

View File

@ -32,7 +32,6 @@ articles = [
'sparse-checkout.adoc',
'sparse-index.adoc',
'trivial-merge.adoc',
'unambiguous-types.adoc',
'unit-tests.adoc',
]
@ -54,7 +53,7 @@ doc_targets += custom_target(
output: 'api-index.html',
depends: documentation_deps,
install: true,
install_dir: htmldir / 'technical',
install_dir: get_option('datadir') / 'doc/git-doc/technical',
)
foreach article : api_docs + articles
@ -64,6 +63,6 @@ foreach article : api_docs + articles
output: fs.stem(article) + '.html',
depends: documentation_deps,
install: true,
install_dir: htmldir / 'technical',
install_dir: get_option('datadir') / 'doc/git-doc/technical',
)
endforeach

View File

@ -1,224 +0,0 @@
= Unambiguous types
Most of these mappings are obvious, but there are some nuances and gotchas with
Rust FFI (Foreign Function Interface).
This document defines clear, one-to-one mappings between primitive types in C,
Rust (and possible other languages in the future). Its purpose is to eliminate
ambiguity in type widths, signedness, and binary representation across
platforms and languages.
For Git, the only header required to use these unambiguous types in C is
`git-compat-util.h`.
== Boolean types
[cols="1,1", options="header"]
|===
| C Type | Rust Type
| bool^1^ | bool
|===
== Integer types
In C, `<stdint.h>` (or an equivalent) must be included.
[cols="1,1", options="header"]
|===
| C Type | Rust Type
| uint8_t | u8
| uint16_t | u16
| uint32_t | u32
| uint64_t | u64
| int8_t | i8
| int16_t | i16
| int32_t | i32
| int64_t | i64
|===
== Floating-point types
Rust requires IEEE-754 semantics.
In C, that is typically true, but not guaranteed by the standard.
[cols="1,1", options="header"]
|===
| C Type | Rust Type
| float^2^ | f32
| double^2^ | f64
|===
== Size types
These types represent pointer-sized integers and are typically defined in
`<stddef.h>` or an equivalent header.
Size types should be used any time pointer arithmetic is performed e.g.
indexing an array, describing the number of elements in memory, etc...
[cols="1,1", options="header"]
|===
| C Type | Rust Type
| size_t^3^ | usize
| ptrdiff_t^3^ | isize
|===
== Character types
This is where C and Rust don't have a clean one-to-one mapping.
A C `char` and a Rust `u8` share the same bit width, so any C struct containing
a `char` will have the same size as the corresponding Rust struct using `u8`.
In that sense, such structs are safe to pass over the FFI boundary, because
their fields will be laid out identically. However, beyond bit width, C `char`
has additional semantics and platform-dependent behavior that can cause
problems, as discussed below.
The C language leaves the signedness of `char` implementation defined. Because
our developer build enables -Wsign-compare, comparison of a value of `char`
type with either signed or unsigned integers may trigger warnings from the
compiler.
Note: Rust's `char` type is an unsigned 32-bit integer that is used to describe
Unicode code points.
=== Notes
^1^ This is only true if stdbool.h (or equivalent) is used. +
^2^ C does not enforce IEEE-754 compatibility, but Rust expects it. If the
platform/arch for C does not follow IEEE-754 then this equivalence does not
hold. Also, it's assumed that `float` is 32 bits and `double` is 64, but
there may be a strange platform/arch where even this isn't true. +
^3^ C also defines uintptr_t, ssize_t and intptr_t, but these types are
discouraged for FFI purposes. For functions like `read()` and `write()` ssize_t
should be cast to a different, and unambiguous, type before being passed over
the FFI boundary. +
== Problems with std::ffi::c_* types in Rust
TL;DR: In practice, Rust's `c_*` types aren't guaranteed to match C types for
all possible C compilers, platforms, or architectures, because Rust only
ensures correctness of C types on officially supported targets. These
definitions have changed over time to match more targets which means that the
c_* definitions will differ based on which Rust version Git chooses to use.
Current list of safe, Rust side, FFI types in Git: +
* `c_void`
* `CStr`
* `CString`
Even then, they should be used sparingly, and only where the semantics match
exactly.
The std::os::raw::c_* directly inherits the problems of core::ffi, which
changes over time and seems to make a best guess at the correct definition for
a given platform/target. This probably isn't a problem for all other platforms
that Rust supports currently, but can anyone say that Rust got it right for all
C compilers of all platforms/targets?
To give an example: c_long is defined in
footnote:[https://doc.rust-lang.org/1.63.0/src/core/ffi/mod.rs.html#175-189[c_long in 1.63.0]]
footnote:[https://doc.rust-lang.org/1.89.0/src/core/ffi/primitives.rs.html#135-151[c_long in 1.89.0]]
=== Rust version 1.63.0
```
mod c_long_definition {
cfg_if! {
if #[cfg(all(target_pointer_width = "64", not(windows)))] {
pub type c_long = i64;
pub type NonZero_c_long = crate::num::NonZeroI64;
pub type c_ulong = u64;
pub type NonZero_c_ulong = crate::num::NonZeroU64;
} else {
// The minimal size of `long` in the C standard is 32 bits
pub type c_long = i32;
pub type NonZero_c_long = crate::num::NonZeroI32;
pub type c_ulong = u32;
pub type NonZero_c_ulong = crate::num::NonZeroU32;
}
}
}
```
=== Rust version 1.89.0
```
mod c_long_definition {
crate::cfg_select! {
any(
all(target_pointer_width = "64", not(windows)),
// wasm32 Linux ABI uses 64-bit long
all(target_arch = "wasm32", target_os = "linux")
) => {
pub(super) type c_long = i64;
pub(super) type c_ulong = u64;
}
_ => {
// The minimal size of `long` in the C standard is 32 bits
pub(super) type c_long = i32;
pub(super) type c_ulong = u32;
}
}
}
```
Even for the cases where C types are correctly mapped to Rust types via
std::ffi::c_* there are still problems. Let's take c_char for example. On some
platforms it's u8 on others it's i8.
=== Subtraction underflow in debug mode
The following code will panic in debug on platforms that define c_char as u8,
but won't if it's an i8.
```
let mut x: std::ffi::c_char = 0;
x -= 1;
```
=== Inconsistent shift behavior
`x` will be 0xC0 for platforms that use i8, but will be 0x40 where it's u8.
```
let mut x: std::ffi::c_char = 0x80;
x >>= 1;
```
=== Equality fails to compile on some platforms
The following will not compile on platforms that define c_char as i8, but will
if it's u8. You can cast x e.g. `assert_eq!(x as u8, b'a');`, but then you get
a warning on platforms that use u8 and a clean compilation where i8 is used.
```
let mut x: std::ffi::c_char = 0x61;
assert_eq!(x, b'a');
```
== Enum types
Rust enum types should not be used as FFI types. Rust enum types are more like
C union types than C enum's. For something like:
```
#[repr(C, u8)]
enum Fruit {
Apple,
Banana,
Cherry,
}
```
It's easy enough to make sure the Rust enum matches what C would expect, but a
more complex type like.
```
enum HashResult {
SHA1([u8; 20]),
SHA256([u8; 32]),
}
```
The Rust compiler has to add a discriminant to the enum to distinguish between
the variants. The width, location, and values for that discriminant is up to
the Rust compiler and is not ABI stable.

View File

@ -4,7 +4,7 @@ REMOTES[[REMOTES]]
------------------
The name of one of the following can be used instead
of a URL as _<repository>_ argument:
of a URL as `<repository>` argument:
* a remote in the Git configuration file: `$GIT_DIR/config`,
* a file in the `$GIT_DIR/remotes` directory, or
@ -32,8 +32,8 @@ config file would appear like this:
fetch = <refspec>
------------
The _<pushurl>_ is used for pushes only. It is optional and defaults
to _<URL>_. Pushing to a remote affects all defined pushurls or all
The `<pushurl>` is used for pushes only. It is optional and defaults
to `<URL>`. Pushing to a remote affects all defined pushurls or all
defined urls if no pushurls are defined. Fetch, however, will only
fetch from the first defined url if multiple urls are defined.
@ -54,8 +54,8 @@ following format:
------------
`Push:` lines are used by `git push` and
`Pull:` lines are used by `git pull` and `git fetch`.
`Push:` lines are used by 'git push' and
`Pull:` lines are used by 'git pull' and 'git fetch'.
Multiple `Push:` and `Pull:` lines may
be specified for additional branch mappings.
@ -72,12 +72,12 @@ This file should have the following format:
<URL>#<head>
------------
_<URL>_ is required; `#<head>` is optional.
`<URL>` is required; `#<head>` is optional.
Depending on the operation, git will use one of the following
refspecs, if you don't provide one on the command line.
_<branch>_ is the name of this file in `$GIT_DIR/branches` and
_<head>_ defaults to `master`.
`<branch>` is the name of this file in `$GIT_DIR/branches` and
`<head>` defaults to `master`.
git fetch uses:
@ -111,7 +111,7 @@ Git defaults to using the upstream branch for remote operations, for example:
'origin/main' have diverged, and have 2 and 3 different commits each
respectively".
The upstream is stored in `.git/config`, in the "`remote`" and "`merge`"
The upstream is stored in `.git/config`, in the "remote" and "merge"
fields. For example, if `main`'s upstream is `origin/main`:
------------

View File

@ -1,6 +1,6 @@
#!/bin/sh
DEF_VER=v2.52.GIT
DEF_VER=v2.52.0-rc1
LF='
'

View File

@ -95,21 +95,11 @@ include shared.mak
# and LDFLAGS appropriately.
#
# Define NO_DARWIN_PORTS if you are building on Darwin/Mac OS X,
# have DarwinPorts (which is an old name for MacPorts) installed
# in /opt/local, but don't want GIT to
# have DarwinPorts installed in /opt/local, but don't want GIT to
# link against any libraries installed there. If defined you may
# specify your own (or DarwinPort's) include directories and
# library directories by defining CFLAGS and LDFLAGS appropriately.
#
# Define NO_HOMEBREW if you don't want to use gettext, libiconv and
# msgfmt installed by Homebrew.
#
# Define HOMEBREW_PREFIX if you have Homebrew installed in a non-default
# location on macOS or on Linux and want to use it.
#
# Define USE_HOMEBREW_LIBICONV to link against libiconv installed by
# Homebrew, if present.
#
# Define NO_APPLE_COMMON_CRYPTO if you are building on Darwin/Mac OS X
# and do not want to use Apple's CommonCrypto library. This allows you
# to provide your own OpenSSL library, for example from MacPorts.
@ -991,7 +981,7 @@ SANITIZE_LEAK =
SANITIZE_ADDRESS =
# For the 'coccicheck' target
SPATCH_INCLUDE_FLAGS = --all-includes $(addprefix -I ,compat ewah refs sha256 trace2 win32 xdiff)
SPATCH_INCLUDE_FLAGS = --all-includes
SPATCH_FLAGS =
SPATCH_TEST_FLAGS =
@ -1211,7 +1201,6 @@ LIB_OBJS += object-file.o
LIB_OBJS += object-name.o
LIB_OBJS += object.o
LIB_OBJS += odb.o
LIB_OBJS += odb/streaming.o
LIB_OBJS += oid-array.o
LIB_OBJS += oidmap.o
LIB_OBJS += oidset.o
@ -1305,6 +1294,7 @@ LIB_OBJS += split-index.o
LIB_OBJS += stable-qsort.o
LIB_OBJS += statinfo.o
LIB_OBJS += strbuf.o
LIB_OBJS += streaming.o
LIB_OBJS += string-list.o
LIB_OBJS += strmap.o
LIB_OBJS += strvec.o
@ -1535,7 +1525,6 @@ CLAR_TEST_SUITES += u-string-list
CLAR_TEST_SUITES += u-strvec
CLAR_TEST_SUITES += u-trailer
CLAR_TEST_SUITES += u-urlmatch-normalization
CLAR_TEST_SUITES += u-utf8-width
CLAR_TEST_PROG = $(UNIT_TEST_BIN)/unit-tests$(X)
CLAR_TEST_OBJS = $(patsubst %,$(UNIT_TEST_DIR)/%.o,$(CLAR_TEST_SUITES))
CLAR_TEST_OBJS += $(UNIT_TEST_DIR)/clar/clar.o
@ -1598,7 +1587,6 @@ SANITIZE_LEAK = YesCompiledWithIt
endif
ifneq ($(filter address,$(SANITIZERS)),)
NO_REGEX = NeededForASAN
NO_MMAP = NeededForASAN
SANITIZE_ADDRESS = YesCompiledWithIt
endif
endif
@ -1702,23 +1690,6 @@ ifeq ($(uname_S),Darwin)
PTHREAD_LIBS =
endif
ifndef NO_HOMEBREW
ifdef HOMEBREW_PREFIX
ifeq ($(shell test -d $(HOMEBREW_PREFIX)/opt/gettext && echo y),y)
BASIC_CFLAGS += -I$(HOMEBREW_PREFIX)/opt/gettext/include
BASIC_LDFLAGS += -L$(HOMEBREW_PREFIX)/opt/gettext/lib
endif
ifeq ($(shell test -x $(HOMEBREW_PREFIX)/opt/gettext/msgfmt && echo y),y)
MSGFMT = $(HOMEBREW_PREFIX)/opt/gettext/msgfmt
endif
ifdef USE_HOMEBREW_LIBICONV
ifeq ($(shell test -d $(HOMEBREW_PREFIX)/opt/libiconv && echo y),y)
ICONVDIR ?= $(HOMEBREW_PREFIX)/opt/libiconv
endif
endif
endif
endif
ifdef NO_LIBGEN_H
COMPAT_CFLAGS += -DNO_LIBGEN_H
COMPAT_OBJS += compat/basename.o
@ -1946,6 +1917,7 @@ ifdef NO_SETENV
endif
ifdef NO_MKDTEMP
COMPAT_CFLAGS += -DNO_MKDTEMP
COMPAT_OBJS += compat/mkdtemp.o
endif
ifdef MKDIR_WO_TRAILING_SLASH
COMPAT_CFLAGS += -DMKDIR_WO_TRAILING_SLASH
@ -2593,7 +2565,7 @@ please_set_SHELL_PATH_to_a_more_modern_shell:
shell_compatibility_test: please_set_SHELL_PATH_to_a_more_modern_shell
strip: $(PROGRAMS) git$X scalar$X
strip: $(PROGRAMS) git$X
$(STRIP) $(STRIP_OPTS) $^
### Target-specific flags and dependencies
@ -3549,7 +3521,7 @@ else
COCCICHECK_PATCH_MUST_BE_EMPTY_FILES = $(COCCICHECK_PATCHES_INTREE)
endif
coccicheck: $(COCCICHECK_PATCH_MUST_BE_EMPTY_FILES)
! grep ^ $(COCCICHECK_PATCH_MUST_BE_EMPTY_FILES) /dev/null
! grep -q ^ $(COCCICHECK_PATCH_MUST_BE_EMPTY_FILES) /dev/null
# See contrib/coccinelle/README
coccicheck-pending: coccicheck-test

View File

@ -1 +1 @@
Documentation/RelNotes/2.53.0.adoc
Documentation/RelNotes/2.52.0.adoc

81
apply.c
View File

@ -1640,14 +1640,6 @@ static void record_ws_error(struct apply_state *state,
state->squelch_whitespace_errors < state->whitespace_error)
return;
/*
* line[len] for an incomplete line points at the "\n" at the end
* of patch input line, so "%.*s" would drop the last letter on line;
* compensate for it.
*/
if (result & WS_INCOMPLETE_LINE)
len++;
err = whitespace_error_string(result);
if (state->apply_verbosity > verbosity_silent)
fprintf(stderr, "%s:%d: %s.\n%.*s\n",
@ -1678,35 +1670,6 @@ static void check_old_for_crlf(struct patch *patch, const char *line, int len)
}
/*
* Just saw a single line in a fragment. If it is a part of this hunk
* that is a context " ", an added "+", or a removed "-" line, it may
* be followed by "\\ No newline..." to signal that the last "\n" on
* this line needs to be dropped. Depending on locale settings when
* the patch was produced we don't know what this line would exactly
* say. The only thing we do know is that it begins with "\ ".
* Checking for 12 is just for sanity check; "\ No newline..." would
* be at least that long in any l10n.
*
* Return 0 if the line we saw is not followed by "\ No newline...",
* or length of that line. The caller will use it to skip over the
* "\ No newline..." line.
*/
static int adjust_incomplete(const char *line, int len,
unsigned long size)
{
int nextlen;
if (*line != '\n' && *line != ' ' && *line != '+' && *line != '-')
return 0;
if (size - len < 12 || memcmp(line + len, "\\ ", 2))
return 0;
nextlen = linelen(line + len, size - len);
if (nextlen < 12)
return 0;
return nextlen;
}
/*
* Parse a unified diff. Note that this really needs to parse each
* fragment separately, since the only way to know the difference
@ -1721,7 +1684,6 @@ static int parse_fragment(struct apply_state *state,
{
int added, deleted;
int len = linelen(line, size), offset;
int skip_len = 0;
unsigned long oldlines, newlines;
unsigned long leading, trailing;
@ -1748,22 +1710,6 @@ static int parse_fragment(struct apply_state *state,
len = linelen(line, size);
if (!len || line[len-1] != '\n')
return -1;
/*
* For an incomplete line, skip_len counts the bytes
* on "\\ No newline..." marker line that comes next
* to the current line.
*
* Reduce "len" to drop the newline at the end of
* line[], but add one to "skip_len", which will be
* added back to "len" for the next iteration, to
* compensate.
*/
skip_len = adjust_incomplete(line, len, size);
if (skip_len) {
len--;
skip_len++;
}
switch (*line) {
default:
return -1;
@ -1799,12 +1745,19 @@ static int parse_fragment(struct apply_state *state,
newlines--;
trailing = 0;
break;
}
/* eat the "\\ No newline..." as well, if exists */
if (skip_len) {
len += skip_len;
state->linenr++;
/*
* We allow "\ No newline at end of file". Depending
* on locale settings when the patch was produced we
* don't know what this line looks like. The only
* thing we do know is that it begins with "\ ".
* Checking for 12 is just for sanity check -- any
* l10n of "\ No newline..." is at least that long.
*/
case '\\':
if (len < 12 || memcmp(line, "\\ ", 2))
return -1;
break;
}
}
if (oldlines || newlines)
@ -1815,6 +1768,14 @@ static int parse_fragment(struct apply_state *state,
fragment->leading = leading;
fragment->trailing = trailing;
/*
* If a fragment ends with an incomplete line, we failed to include
* it in the above loop because we hit oldlines == newlines == 0
* before seeing it.
*/
if (12 < size && !memcmp(line, "\\ ", 2))
offset += linelen(line, size);
patch->lines_added += added;
patch->lines_deleted += deleted;
@ -3818,7 +3779,7 @@ static int check_preimage(struct apply_state *state,
if (*ce && !(*ce)->ce_mode)
BUG("ce_mode == 0 for path '%s'", old_name);
if (trust_executable_bit || !S_ISREG(st->st_mode))
if (trust_executable_bit)
st_mode = ce_mode_from_stat(*ce, st->st_mode);
else if (*ce)
st_mode = (*ce)->ce_mode;

View File

@ -12,8 +12,8 @@
#include "tar.h"
#include "archive.h"
#include "odb.h"
#include "odb/streaming.h"
#include "strbuf.h"
#include "streaming.h"
#include "run-command.h"
#include "write-or-die.h"
@ -129,20 +129,22 @@ static void write_trailer(void)
*/
static int stream_blocked(struct repository *r, const struct object_id *oid)
{
struct odb_read_stream *st;
struct git_istream *st;
enum object_type type;
unsigned long sz;
char buf[BLOCKSIZE];
ssize_t readlen;
st = odb_read_stream_open(r->objects, oid, NULL);
st = open_istream(r, oid, &type, &sz, NULL);
if (!st)
return error(_("cannot stream blob %s"), oid_to_hex(oid));
for (;;) {
readlen = odb_read_stream_read(st, buf, sizeof(buf));
readlen = read_istream(st, buf, sizeof(buf));
if (readlen <= 0)
break;
do_write_blocked(buf, readlen);
}
odb_read_stream_close(st);
close_istream(st);
if (!readlen)
finish_record();
return readlen;

View File

@ -10,9 +10,9 @@
#include "gettext.h"
#include "git-zlib.h"
#include "hex.h"
#include "streaming.h"
#include "utf8.h"
#include "odb.h"
#include "odb/streaming.h"
#include "strbuf.h"
#include "userdiff.h"
#include "write-or-die.h"
@ -309,7 +309,7 @@ static int write_zip_entry(struct archiver_args *args,
enum zip_method method;
unsigned char *out;
void *deflated = NULL;
struct odb_read_stream *stream = NULL;
struct git_istream *stream = NULL;
unsigned long flags = 0;
int is_binary = -1;
const char *path_without_prefix = path + args->baselen;
@ -347,11 +347,12 @@ static int write_zip_entry(struct archiver_args *args,
method = ZIP_METHOD_DEFLATE;
if (!buffer) {
stream = odb_read_stream_open(args->repo->objects, oid, NULL);
enum object_type type;
stream = open_istream(args->repo, oid, &type, &size,
NULL);
if (!stream)
return error(_("cannot stream blob %s"),
oid_to_hex(oid));
size = stream->size;
flags |= ZIP_STREAM;
out = NULL;
} else {
@ -428,7 +429,7 @@ static int write_zip_entry(struct archiver_args *args,
ssize_t readlen;
for (;;) {
readlen = odb_read_stream_read(stream, buf, sizeof(buf));
readlen = read_istream(stream, buf, sizeof(buf));
if (readlen <= 0)
break;
crc = crc32(crc, buf, readlen);
@ -438,7 +439,7 @@ static int write_zip_entry(struct archiver_args *args,
buf, readlen);
write_or_die(1, buf, readlen);
}
odb_read_stream_close(stream);
close_istream(stream);
if (readlen)
return readlen;
@ -461,7 +462,7 @@ static int write_zip_entry(struct archiver_args *args,
zstream.avail_out = sizeof(compressed);
for (;;) {
readlen = odb_read_stream_read(stream, buf, sizeof(buf));
readlen = read_istream(stream, buf, sizeof(buf));
if (readlen <= 0)
break;
crc = crc32(crc, buf, readlen);
@ -485,7 +486,7 @@ static int write_zip_entry(struct archiver_args *args,
}
}
odb_read_stream_close(stream);
close_istream(stream);
if (readlen)
return readlen;

50
attr.c
View File

@ -1064,52 +1064,24 @@ static int path_matches(const char *pathname, int pathlen,
pattern, prefix, pat->patternlen);
}
struct attr_state_queue {
const struct attr_state **items;
size_t alloc, nr;
};
static void attr_state_queue_push(struct attr_state_queue *t,
const struct match_attr *a)
{
for (size_t i = 0; i < a->num_attr; i++) {
ALLOC_GROW(t->items, t->nr + 1, t->alloc);
t->items[t->nr++] = &a->state[i];
}
}
static const struct attr_state *attr_state_queue_pop(struct attr_state_queue *t)
{
return t->nr ? t->items[--t->nr] : NULL;
}
static void attr_state_queue_release(struct attr_state_queue *t)
{
free(t->items);
}
static int macroexpand_one(struct all_attrs_item *all_attrs, int nr, int rem);
static int fill_one(struct all_attrs_item *all_attrs,
const struct match_attr *a, int rem)
{
struct attr_state_queue todo = { 0 };
const struct attr_state *state;
size_t i;
attr_state_queue_push(&todo, a);
while (rem > 0 && (state = attr_state_queue_pop(&todo))) {
const struct git_attr *attr = state->attr;
for (i = a->num_attr; rem > 0 && i > 0; i--) {
const struct git_attr *attr = a->state[i - 1].attr;
const char **n = &(all_attrs[attr->attr_nr].value);
const char *v = state->setto;
const char *v = a->state[i - 1].setto;
if (*n == ATTR__UNKNOWN) {
const struct all_attrs_item *item =
&all_attrs[attr->attr_nr];
*n = v;
rem--;
if (item->macro && item->value == ATTR__TRUE)
attr_state_queue_push(&todo, item->macro);
rem = macroexpand_one(all_attrs, attr->attr_nr, rem);
}
}
attr_state_queue_release(&todo);
return rem;
}
@ -1134,6 +1106,16 @@ static int fill(const char *path, int pathlen, int basename_offset,
return rem;
}
static int macroexpand_one(struct all_attrs_item *all_attrs, int nr, int rem)
{
const struct all_attrs_item *item = &all_attrs[nr];
if (item->macro && item->value == ATTR__TRUE)
return fill_one(all_attrs, item->macro, rem);
else
return rem;
}
/*
* Marks the attributes which are macros based on the attribute stack.
* This prevents having to search through the attribute stack each time

View File

@ -41,7 +41,4 @@
#undef asctime_r
#define asctime_r(t, buf) BANNED(asctime_r)
#undef mktemp
#define mktemp(x) BANNED(mktemp)
#endif /* BANNED_H */

View File

@ -450,20 +450,21 @@ void find_bisection(struct commit_list **commit_list, int *reaches,
clear_commit_weight(&commit_weight);
}
static int register_ref(const struct reference *ref, void *cb_data UNUSED)
static int register_ref(const char *refname, const char *referent UNUSED, const struct object_id *oid,
int flags UNUSED, void *cb_data UNUSED)
{
struct strbuf good_prefix = STRBUF_INIT;
strbuf_addstr(&good_prefix, term_good);
strbuf_addstr(&good_prefix, "-");
if (!strcmp(ref->name, term_bad)) {
if (!strcmp(refname, term_bad)) {
free(current_bad_oid);
current_bad_oid = xmalloc(sizeof(*current_bad_oid));
oidcpy(current_bad_oid, ref->oid);
} else if (starts_with(ref->name, good_prefix.buf)) {
oid_array_append(&good_revs, ref->oid);
} else if (starts_with(ref->name, "skip-")) {
oid_array_append(&skipped_revs, ref->oid);
oidcpy(current_bad_oid, oid);
} else if (starts_with(refname, good_prefix.buf)) {
oid_array_append(&good_revs, oid);
} else if (starts_with(refname, "skip-")) {
oid_array_append(&skipped_revs, oid);
}
strbuf_release(&good_prefix);
@ -1177,11 +1178,14 @@ int estimate_bisect_steps(int all)
return (e < 3 * x) ? n : n - 1;
}
static int mark_for_removal(const struct reference *ref, void *cb_data)
static int mark_for_removal(const char *refname,
const char *referent UNUSED,
const struct object_id *oid UNUSED,
int flag UNUSED, void *cb_data)
{
struct string_list *refs = cb_data;
char *bisect_ref = xstrfmt("refs/bisect%s", ref->name);
string_list_append(refs, bisect_ref);
char *ref = xstrfmt("refs/bisect%s", refname);
string_list_append(refs, ref);
return 0;
}

View File

@ -375,7 +375,7 @@ int validate_branchname(const char *name, struct strbuf *ref)
if (check_branch_ref(ref, name)) {
int code = die_message(_("'%s' is not a valid branch name"), name);
advise_if_enabled(ADVICE_REF_SYNTAX,
_("See 'git help check-ref-format'"));
_("See `man git check-ref-format`"));
exit(code);
}

View File

@ -363,7 +363,10 @@ static int check_and_set_terms(struct bisect_terms *terms, const char *cmd)
return 0;
}
static int inc_nr(const struct reference *ref UNUSED, void *cb_data)
static int inc_nr(const char *refname UNUSED,
const char *referent UNUSED,
const struct object_id *oid UNUSED,
int flag UNUSED, void *cb_data)
{
unsigned int *nr = (unsigned int *)cb_data;
(*nr)++;
@ -551,11 +554,12 @@ finish:
return res;
}
static int add_bisect_ref(const struct reference *ref, void *cb)
static int add_bisect_ref(const char *refname, const char *referent UNUSED, const struct object_id *oid,
int flags UNUSED, void *cb)
{
struct add_bisect_ref_data *data = cb;
add_pending_oid(data->revs, ref->name, ref->oid, data->object_flags);
add_pending_oid(data->revs, refname, oid, data->object_flags);
return 0;
}
@ -1166,9 +1170,12 @@ static int bisect_visualize(struct bisect_terms *terms, int argc,
return run_command(&cmd);
}
static int get_first_good(const struct reference *ref, void *cb_data)
static int get_first_good(const char *refname UNUSED,
const char *referent UNUSED,
const struct object_id *oid,
int flag UNUSED, void *cb_data)
{
oidcpy(cb_data, ref->oid);
oidcpy(cb_data, oid);
return 1;
}

View File

@ -739,8 +739,7 @@ static int git_blame_config(const char *var, const char *value,
ret = git_config_pathname(&str, var, value);
if (ret)
return ret;
if (str)
string_list_insert(&ignore_revs_file_list, str);
string_list_insert(&ignore_revs_file_list, str);
free(str);
return 0;
}
@ -780,19 +779,6 @@ static int git_blame_config(const char *var, const char *value,
}
}
if (!strcmp(var, "diff.algorithm")) {
long diff_algorithm;
if (!value)
return config_error_nonbool(var);
diff_algorithm = parse_algorithm_value(value);
if (diff_algorithm < 0)
return error(_("unknown value for config '%s': %s"),
var, value);
xdl_opts &= ~XDF_DIFF_ALGORITHM_MASK;
xdl_opts |= diff_algorithm;
return 0;
}
if (git_diff_heuristic_config(var, value, cb) < 0)
return -1;
if (userdiff_config(var, value) < 0)
@ -838,38 +824,6 @@ static int blame_move_callback(const struct option *option, const char *arg, int
return 0;
}
static int blame_diff_algorithm_minimal(const struct option *option,
const char *arg, int unset)
{
int *opt = option->value;
BUG_ON_OPT_ARG(arg);
*opt &= ~XDF_DIFF_ALGORITHM_MASK;
if (!unset)
*opt |= XDF_NEED_MINIMAL;
return 0;
}
static int blame_diff_algorithm_callback(const struct option *option,
const char *arg, int unset)
{
int *opt = option->value;
long value = parse_algorithm_value(arg);
BUG_ON_OPT_NEG(unset);
if (value < 0)
return error(_("option diff-algorithm accepts \"myers\", "
"\"minimal\", \"patience\" and \"histogram\""));
*opt &= ~XDF_DIFF_ALGORITHM_MASK;
*opt |= value;
return 0;
}
static int is_a_rev(const char *name)
{
struct object_id oid;
@ -961,16 +915,11 @@ int cmd_blame(int argc,
OPT_BIT('s', NULL, &output_option, N_("suppress author name and timestamp (Default: off)"), OUTPUT_NO_AUTHOR),
OPT_BIT('e', "show-email", &output_option, N_("show author email instead of name (Default: off)"), OUTPUT_SHOW_EMAIL),
OPT_BIT('w', NULL, &xdl_opts, N_("ignore whitespace differences"), XDF_IGNORE_WHITESPACE),
OPT_CALLBACK_F(0, "diff-algorithm", &xdl_opts, N_("<algorithm>"),
N_("choose a diff algorithm"),
PARSE_OPT_NONEG, blame_diff_algorithm_callback),
OPT_STRING_LIST(0, "ignore-rev", &ignore_rev_list, N_("rev"), N_("ignore <rev> when blaming")),
OPT_STRING_LIST(0, "ignore-revs-file", &ignore_revs_file_list, N_("file"), N_("ignore revisions from <file>")),
OPT_BIT(0, "color-lines", &output_option, N_("color redundant metadata from previous line differently"), OUTPUT_COLOR_LINE),
OPT_BIT(0, "color-by-age", &output_option, N_("color lines by age"), OUTPUT_SHOW_AGE_WITH_COLOR),
OPT_CALLBACK_F(0, "minimal", &xdl_opts, NULL,
N_("spend extra cycles to find a better match"),
PARSE_OPT_NOARG | PARSE_OPT_HIDDEN, blame_diff_algorithm_minimal),
OPT_BIT(0, "minimal", &xdl_opts, N_("spend extra cycles to find better match"), XDF_NEED_MINIMAL),
OPT_STRING('S', NULL, &revs_file, N_("file"), N_("use revisions from <file> instead of calling git-rev-list")),
OPT_STRING(0, "contents", &contents_from, N_("file"), N_("use <file>'s contents as the final image")),
OPT_CALLBACK_F('C', NULL, &opt, N_("score"), N_("find line copies within and across files"), PARSE_OPT_OPTARG, blame_copy_callback),

View File

@ -591,7 +591,7 @@ static void copy_or_rename_branch(const char *oldname, const char *newname, int
else {
int code = die_message(_("invalid branch name: '%s'"), oldname);
advise_if_enabled(ADVICE_REF_SYNTAX,
_("See 'git help check-ref-format'"));
_("See `man git check-ref-format`"));
exit(code);
}
}

View File

@ -18,13 +18,13 @@
#include "list-objects-filter-options.h"
#include "parse-options.h"
#include "userdiff.h"
#include "streaming.h"
#include "oid-array.h"
#include "packfile.h"
#include "pack-bitmap.h"
#include "object-file.h"
#include "object-name.h"
#include "odb.h"
#include "odb/streaming.h"
#include "replace-object.h"
#include "promisor-remote.h"
#include "mailmap.h"
@ -95,7 +95,7 @@ static int filter_object(const char *path, unsigned mode,
static int stream_blob(const struct object_id *oid)
{
if (odb_stream_blob_to_fd(the_repository->objects, 1, oid, NULL, 0))
if (stream_blob_to_fd(1, oid, NULL, 0))
die("unable to stream %s to stdout", oid_to_hex(oid));
return 0;
}

View File

@ -1063,9 +1063,11 @@ static void update_refs_for_switch(const struct checkout_opts *opts,
report_tracking(new_branch_info);
}
static int add_pending_uninteresting_ref(const struct reference *ref, void *cb_data)
static int add_pending_uninteresting_ref(const char *refname, const char *referent UNUSED,
const struct object_id *oid,
int flags UNUSED, void *cb_data)
{
add_pending_oid(cb_data, ref->name, ref->oid, UNINTERESTING);
add_pending_oid(cb_data, refname, oid, UNINTERESTING);
return 0;
}
@ -1899,7 +1901,7 @@ static int checkout_main(int argc, const char **argv, const char *prefix,
struct object_id rev;
if (repo_get_oid_mb(the_repository, opts->from_treeish, &rev))
die(_("could not resolve '%s'"), opts->from_treeish);
die(_("could not resolve %s"), opts->from_treeish);
setup_new_branch_info_and_source_tree(&new_branch_info,
opts, &rev,

View File

@ -1617,7 +1617,7 @@ int cmd_clone(int argc,
transport_disconnect(transport);
if (option_dissociate) {
odb_close(the_repository->objects);
close_object_store(the_repository->objects);
dissociate_from_references();
}

View File

@ -261,12 +261,6 @@ struct strbuf_list {
int alloc;
};
/*
* Format the configuration key-value pair (`key_`, `value_`) and
* append it into strbuf `buf`. Returns a negative value on failure,
* 0 on success, 1 on a missing optional value (i.e., telling the
* caller to pretend that <key_,value_> did not exist).
*/
static int format_config(const struct config_display_options *opts,
struct strbuf *buf, const char *key_,
const char *value_, const struct key_value_info *kvi)
@ -305,10 +299,7 @@ static int format_config(const struct config_display_options *opts,
char *v;
if (git_config_pathname(&v, key_, value_) < 0)
return -1;
if (v)
strbuf_addstr(buf, v);
else
return 1; /* :(optional)no-such-file */
strbuf_addstr(buf, v);
free((char *)v);
} else if (opts->type == TYPE_EXPIRY_DATE) {
timestamp_t t;
@ -353,7 +344,6 @@ static int collect_config(const char *key_, const char *value_,
struct collect_config_data *data = cb;
struct strbuf_list *values = data->values;
const struct key_value_info *kvi = ctx->kvi;
int status;
if (!(data->get_value_flags & GET_VALUE_KEY_REGEXP) &&
strcmp(key_, data->key))
@ -371,15 +361,8 @@ static int collect_config(const char *key_, const char *value_,
ALLOC_GROW(values->items, values->nr + 1, values->alloc);
strbuf_init(&values->items[values->nr], 0);
status = format_config(data->display_opts, &values->items[values->nr++],
key_, value_, kvi);
if (status < 0)
return status;
if (status) {
strbuf_release(&values->items[--values->nr]);
status = 0;
}
return status;
return format_config(data->display_opts, &values->items[values->nr++],
key_, value_, kvi);
}
static int get_value(const struct config_location_options *opts,
@ -455,23 +438,15 @@ static int get_value(const struct config_location_options *opts,
if (!values.nr && display_opts->default_value) {
struct key_value_info kvi = KVI_INIT;
struct strbuf *item;
int status;
kvi_from_param(&kvi);
ALLOC_GROW(values.items, values.nr + 1, values.alloc);
item = &values.items[values.nr++];
strbuf_init(item, 0);
status = format_config(display_opts, item, key_,
display_opts->default_value, &kvi);
if (status < 0)
if (format_config(display_opts, item, key_,
display_opts->default_value, &kvi) < 0)
die(_("failed to format default config value: %s"),
display_opts->default_value);
if (status) {
/* default was a missing optional value */
values.nr--;
strbuf_release(item);
}
}
ret = !values.nr;
@ -739,13 +714,11 @@ static int get_urlmatch(const struct config_location_options *opts,
for_each_string_list_item(item, &values) {
struct urlmatch_current_candidate_value *matched = item->util;
struct strbuf buf = STRBUF_INIT;
int status;
status = format_config(&display_opts, &buf, item->string,
matched->value_is_null ? NULL : matched->value.buf,
&matched->kvi);
if (!status)
fwrite(buf.buf, 1, buf.len, stdout);
format_config(&display_opts, &buf, item->string,
matched->value_is_null ? NULL : matched->value.buf,
&matched->kvi);
fwrite(buf.buf, 1, buf.len, stdout);
strbuf_release(&buf);
strbuf_release(&matched->value);
@ -1012,7 +985,7 @@ static int cmd_config_set(int argc, const char **argv, const char *prefix,
argv[0], comment, value);
if (ret == CONFIG_NOTHING_SET)
error(_("cannot overwrite multiple values with a single value\n"
" Use --value=<pattern>, --append or --all to change %s."), argv[0]);
" Use a regexp, --add or --replace-all to change %s."), argv[0]);
}
location_options_release(&location_opts);
@ -1030,8 +1003,8 @@ static int cmd_config_unset(int argc, const char **argv, const char *prefix,
struct option opts[] = {
CONFIG_LOCATION_OPTIONS(location_opts),
OPT_GROUP(N_("Filter")),
OPT_BIT(0, "all", &flags, N_("unset all multi-valued config options"), CONFIG_FLAGS_MULTI_REPLACE),
OPT_STRING(0, "value", &value_pattern, N_("pattern"), N_("unset multi-valued config options with matching values")),
OPT_BIT(0, "all", &flags, N_("replace multi-valued config option with new value"), CONFIG_FLAGS_MULTI_REPLACE),
OPT_STRING(0, "value", &value_pattern, N_("pattern"), N_("show config with values matching the pattern")),
OPT_BIT(0, "fixed-value", &flags, N_("use string equality when comparing values to value pattern"), CONFIG_FLAGS_FIXED_VALUE),
OPT_END(),
};

View File

@ -112,13 +112,13 @@ static int replace_name(struct commit_name *e,
if (!e->tag) {
t = lookup_tag(the_repository, &e->oid);
if (!t || parse_tag(the_repository, t))
if (!t || parse_tag(t))
return 1;
e->tag = t;
}
t = lookup_tag(the_repository, oid);
if (!t || parse_tag(the_repository, t))
if (!t || parse_tag(t))
return 0;
*tag = t;
@ -154,19 +154,20 @@ static void add_to_known_names(const char *path,
}
}
static int get_name(const struct reference *ref, void *cb_data UNUSED)
static int get_name(const char *path, const char *referent UNUSED, const struct object_id *oid,
int flag UNUSED, void *cb_data UNUSED)
{
int is_tag = 0;
struct object_id peeled;
int is_annotated, prio;
const char *path_to_match = NULL;
if (skip_prefix(ref->name, "refs/tags/", &path_to_match)) {
if (skip_prefix(path, "refs/tags/", &path_to_match)) {
is_tag = 1;
} else if (all) {
if ((exclude_patterns.nr || patterns.nr) &&
!skip_prefix(ref->name, "refs/heads/", &path_to_match) &&
!skip_prefix(ref->name, "refs/remotes/", &path_to_match)) {
!skip_prefix(path, "refs/heads/", &path_to_match) &&
!skip_prefix(path, "refs/remotes/", &path_to_match)) {
/* Only accept reference of known type if there are match/exclude patterns */
return 0;
}
@ -208,10 +209,10 @@ static int get_name(const struct reference *ref, void *cb_data UNUSED)
}
/* Is it annotated? */
if (!reference_get_peeled_oid(the_repository, ref, &peeled)) {
is_annotated = !oideq(ref->oid, &peeled);
if (!peel_iterated_oid(the_repository, oid, &peeled)) {
is_annotated = !oideq(oid, &peeled);
} else {
oidcpy(&peeled, ref->oid);
oidcpy(&peeled, oid);
is_annotated = 0;
}
@ -228,8 +229,7 @@ static int get_name(const struct reference *ref, void *cb_data UNUSED)
else
prio = 0;
add_to_known_names(all ? ref->name + 5 : ref->name + 10,
&peeled, prio, ref->oid);
add_to_known_names(all ? path + 5 : path + 10, &peeled, prio, oid);
return 0;
}
@ -335,7 +335,7 @@ static void append_name(struct commit_name *n, struct strbuf *dst)
{
if (n->prio == 2 && !n->tag) {
n->tag = lookup_tag(the_repository, &n->oid);
if (!n->tag || parse_tag(the_repository, n->tag))
if (!n->tag || parse_tag(n->tag))
die(_("annotated tag %s not available"), n->path);
}
if (n->tag && !n->name_checked) {

View File

@ -65,7 +65,7 @@ static int parse_opt_sign_mode(const struct option *opt,
return 0;
if (parse_sign_mode(arg, val))
return error(_("unknown %s mode: %s"), opt->long_name, arg);
return error("Unknown %s mode: %s", opt->long_name, arg);
return 0;
}
@ -82,7 +82,7 @@ static int parse_opt_tag_of_filtered_mode(const struct option *opt,
else if (!strcmp(arg, "rewrite"))
*val = REWRITE;
else
return error(_("unknown tag-of-filtered mode: %s"), arg);
return error("Unknown tag-of-filtered mode: %s", arg);
return 0;
}
@ -107,7 +107,7 @@ static int parse_opt_reencode_mode(const struct option *opt,
if (!strcasecmp(arg, "abort"))
*val = REENCODE_ABORT;
else
return error(_("unknown reencoding mode: %s"), arg);
return error("Unknown reencoding mode: %s", arg);
}
return 0;
@ -318,16 +318,16 @@ static void export_blob(const struct object_id *oid)
} else {
buf = odb_read_object(the_repository->objects, oid, &type, &size);
if (!buf)
die(_("could not read blob %s"), oid_to_hex(oid));
die("could not read blob %s", oid_to_hex(oid));
if (check_object_signature(the_repository, oid, buf, size,
type) < 0)
die(_("oid mismatch in blob %s"), oid_to_hex(oid));
die("oid mismatch in blob %s", oid_to_hex(oid));
object = parse_object_buffer(the_repository, oid, type,
size, buf, &eaten);
}
if (!object)
die(_("could not read blob %s"), oid_to_hex(oid));
die("Could not read blob %s", oid_to_hex(oid));
mark_next_object(object);
@ -336,7 +336,7 @@ static void export_blob(const struct object_id *oid)
printf("original-oid %s\n", oid_to_hex(oid));
printf("data %"PRIuMAX"\n", (uintmax_t)size);
if (size && fwrite(buf, size, 1, stdout) != 1)
die_errno(_("could not write blob '%s'"), oid_to_hex(oid));
die_errno("could not write blob '%s'", oid_to_hex(oid));
printf("\n");
show_progress();
@ -499,10 +499,10 @@ static void show_filemodify(struct diff_queue_struct *q,
break;
default:
die(_("unexpected comparison status '%c' for %s, %s"),
q->queue[i]->status,
ospec->path ? ospec->path : _("none"),
spec->path ? spec->path : _("none"));
die("Unexpected comparison status '%c' for %s, %s",
q->queue[i]->status,
ospec->path ? ospec->path : "none",
spec->path ? spec->path : "none");
}
}
}
@ -699,14 +699,14 @@ static void handle_commit(struct commit *commit, struct rev_info *rev,
author = strstr(commit_buffer_cursor, "\nauthor ");
if (!author)
die(_("could not find author in commit %s"),
die("could not find author in commit %s",
oid_to_hex(&commit->object.oid));
author++;
commit_buffer_cursor = author_end = strchrnul(author, '\n');
committer = strstr(commit_buffer_cursor, "\ncommitter ");
if (!committer)
die(_("could not find committer in commit %s"),
die("could not find committer in commit %s",
oid_to_hex(&commit->object.oid));
committer++;
commit_buffer_cursor = committer_end = strchrnul(committer, '\n');
@ -781,8 +781,8 @@ static void handle_commit(struct commit *commit, struct rev_info *rev,
case REENCODE_NO:
break;
case REENCODE_ABORT:
die(_("encountered commit-specific encoding %.*s in commit "
"%s; use --reencode=[yes|no] to handle it"),
die("Encountered commit-specific encoding %.*s in commit "
"%s; use --reencode=[yes|no] to handle it",
(int)encoding_len, encoding,
oid_to_hex(&commit->object.oid));
}
@ -797,9 +797,12 @@ static void handle_commit(struct commit *commit, struct rev_info *rev,
(int)(committer_end - committer), committer);
if (signatures.nr) {
switch (signed_commit_mode) {
/* Exporting modes */
case SIGN_ABORT:
die("encountered signed commit %s; use "
"--signed-commits=<mode> to handle it",
oid_to_hex(&commit->object.oid));
case SIGN_WARN_VERBATIM:
warning(_("exporting %"PRIuMAX" signature(s) for commit %s"),
warning("exporting %"PRIuMAX" signature(s) for commit %s",
(uintmax_t)signatures.nr, oid_to_hex(&commit->object.oid));
/* fallthru */
case SIGN_VERBATIM:
@ -808,25 +811,12 @@ static void handle_commit(struct commit *commit, struct rev_info *rev,
print_signature(item->string, item->util);
}
break;
/* Stripping modes */
case SIGN_WARN_STRIP:
warning(_("stripping signature(s) from commit %s"),
warning("stripping signature(s) from commit %s",
oid_to_hex(&commit->object.oid));
/* fallthru */
case SIGN_STRIP:
break;
/* Aborting modes */
case SIGN_ABORT:
die(_("encountered signed commit %s; use "
"--signed-commits=<mode> to handle it"),
oid_to_hex(&commit->object.oid));
case SIGN_STRIP_IF_INVALID:
die(_("'strip-if-invalid' is not a valid mode for "
"git fast-export with --signed-commits=<mode>"));
default:
BUG("invalid signed_commit_mode value %d", signed_commit_mode);
}
string_list_clear(&signatures, 0);
}
@ -900,8 +890,7 @@ static void handle_tag(const char *name, struct tag *tag)
tagged = ((struct tag *)tagged)->tagged;
}
if (tagged->type == OBJ_TREE) {
warning(_("omitting tag %s,\nsince tags of trees (or tags "
"of tags of trees, etc.) are not supported."),
warning("Omitting tag %s,\nsince tags of trees (or tags of tags of trees, etc.) are not supported.",
oid_to_hex(&tag->object.oid));
return;
}
@ -909,7 +898,7 @@ static void handle_tag(const char *name, struct tag *tag)
buf = odb_read_object(the_repository->objects, &tag->object.oid,
&type, &size);
if (!buf)
die(_("could not read tag %s"), oid_to_hex(&tag->object.oid));
die("could not read tag %s", oid_to_hex(&tag->object.oid));
message = memmem(buf, size, "\n\n", 2);
if (message) {
message += 2;
@ -945,33 +934,23 @@ static void handle_tag(const char *name, struct tag *tag)
size_t sig_offset = parse_signed_buffer(message, message_size);
if (sig_offset < message_size)
switch (signed_tag_mode) {
/* Exporting modes */
case SIGN_ABORT:
die("encountered signed tag %s; use "
"--signed-tags=<mode> to handle it",
oid_to_hex(&tag->object.oid));
case SIGN_WARN_VERBATIM:
warning(_("exporting signed tag %s"),
warning("exporting signed tag %s",
oid_to_hex(&tag->object.oid));
/* fallthru */
case SIGN_VERBATIM:
break;
/* Stripping modes */
case SIGN_WARN_STRIP:
warning(_("stripping signature from tag %s"),
warning("stripping signature from tag %s",
oid_to_hex(&tag->object.oid));
/* fallthru */
case SIGN_STRIP:
message_size = sig_offset;
break;
/* Aborting modes */
case SIGN_ABORT:
die(_("encountered signed tag %s; use "
"--signed-tags=<mode> to handle it"),
oid_to_hex(&tag->object.oid));
case SIGN_STRIP_IF_INVALID:
die(_("'strip-if-invalid' is not a valid mode for "
"git fast-export with --signed-tags=<mode>"));
default:
BUG("invalid signed_commit_mode value %d", signed_commit_mode);
}
}
@ -981,8 +960,8 @@ static void handle_tag(const char *name, struct tag *tag)
if (!tagged_mark) {
switch (tag_of_filtered_mode) {
case TAG_FILTERING_ABORT:
die(_("tag %s tags unexported object; use "
"--tag-of-filtered-object=<mode> to handle it"),
die("tag %s tags unexported object; use "
"--tag-of-filtered-object=<mode> to handle it",
oid_to_hex(&tag->object.oid));
case DROP:
/* Ignore this tag altogether */
@ -990,7 +969,7 @@ static void handle_tag(const char *name, struct tag *tag)
return;
case REWRITE:
if (tagged->type == OBJ_TAG && !mark_tags) {
die(_("cannot export nested tags unless --mark-tags is specified."));
die(_("Error: Cannot export nested tags unless --mark-tags is specified."));
} else if (tagged->type == OBJ_COMMIT) {
p = rewrite_commit((struct commit *)tagged);
if (!p) {
@ -1046,7 +1025,7 @@ static struct commit *get_commit(struct rev_cmdline_entry *e, const char *full_n
tag = (struct tag *)tag->tagged;
}
if (!tag)
die(_("tag %s points nowhere?"), e->name);
die("Tag %s points nowhere?", e->name);
return (struct commit *)tag;
}
default:
@ -1084,7 +1063,7 @@ static void get_tags_and_duplicates(struct rev_cmdline_info *info)
commit = get_commit(e, full_name);
if (!commit) {
warning(_("%s: unexpected object of type %s, skipping."),
warning("%s: Unexpected object of type %s, skipping.",
e->name,
type_name(e->item->type));
free(full_name);
@ -1099,7 +1078,7 @@ static void get_tags_and_duplicates(struct rev_cmdline_info *info)
free(full_name);
continue;
default: /* OBJ_TAG (nested tags) is already handled */
warning(_("tag points to object of unexpected type %s, skipping."),
warning("Tag points to object of unexpected type %s, skipping.",
type_name(commit->object.type));
free(full_name);
continue;
@ -1195,7 +1174,7 @@ static void export_marks(char *file)
f = fopen_for_writing(file);
if (!f)
die_errno(_("unable to open marks file %s for writing."), file);
die_errno("Unable to open marks file %s for writing.", file);
for (i = 0; i < idnums.size; i++) {
if (deco->base && deco->base->type == 1) {
@ -1212,7 +1191,7 @@ static void export_marks(char *file)
e |= ferror(f);
e |= fclose(f);
if (e)
error(_("unable to write marks file %s."), file);
error("Unable to write marks file %s.", file);
}
static void import_marks(char *input_file, int check_exists)
@ -1235,20 +1214,20 @@ static void import_marks(char *input_file, int check_exists)
line_end = strchr(line, '\n');
if (line[0] != ':' || !line_end)
die(_("corrupt mark line: %s"), line);
die("corrupt mark line: %s", line);
*line_end = '\0';
mark = strtoumax(line + 1, &mark_end, 10);
if (!mark || mark_end == line + 1
|| *mark_end != ' ' || get_oid_hex(mark_end + 1, &oid))
die(_("corrupt mark line: %s"), line);
die("corrupt mark line: %s", line);
if (last_idnum < mark)
last_idnum = mark;
type = odb_read_object_info(the_repository->objects, &oid, NULL);
if (type < 0)
die(_("object not found: %s"), oid_to_hex(&oid));
die("object not found: %s", oid_to_hex(&oid));
if (type != OBJ_COMMIT)
/* only commits */
@ -1256,12 +1235,12 @@ static void import_marks(char *input_file, int check_exists)
commit = lookup_commit(the_repository, &oid);
if (!commit)
die(_("not a commit? can't happen: %s"), oid_to_hex(&oid));
die("not a commit? can't happen: %s", oid_to_hex(&oid));
object = &commit->object;
if (object->flags & SHOWN)
error(_("object %s already has a mark"), oid_to_hex(&oid));
error("Object %s already has a mark", oid_to_hex(&oid));
mark_object(object, mark);
@ -1415,7 +1394,7 @@ int cmd_fast_export(int argc,
get_tags_and_duplicates(&revs.cmdline);
if (prepare_revision_walk(&revs))
die(_("revision walk setup failed"));
die("revision walk setup failed");
revs.reverse = 1;
revs.diffopt.format_callback = show_filemodify;

File diff suppressed because it is too large Load Diff

View File

@ -47,7 +47,7 @@
static const char * const builtin_fetch_usage[] = {
N_("git fetch [<options>] [<repository> [<refspec>...]]"),
N_("git fetch [<options>] <group>"),
N_("git fetch --multiple [<options>] [(<repository>|<group>)...]"),
N_("git fetch --multiple [<options>] [(<repository> | <group>)...]"),
N_("git fetch --all [<options>]"),
NULL
};
@ -289,11 +289,13 @@ static struct refname_hash_entry *refname_hash_add(struct hashmap *map,
return ent;
}
static int add_one_refname(const struct reference *ref, void *cbdata)
static int add_one_refname(const char *refname, const char *referent UNUSED,
const struct object_id *oid,
int flag UNUSED, void *cbdata)
{
struct hashmap *refname_map = cbdata;
(void) refname_hash_add(refname_map, ref->name, ref->oid);
(void) refname_hash_add(refname_map, refname, oid);
return 0;
}
@ -1414,11 +1416,14 @@ static void set_option(struct transport *transport, const char *name, const char
}
static int add_oid(const struct reference *ref, void *cb_data)
static int add_oid(const char *refname UNUSED,
const char *referent UNUSED,
const struct object_id *oid,
int flags UNUSED, void *cb_data)
{
struct oid_array *oids = cb_data;
oid_array_append(oids, ref->oid);
oid_array_append(oids, oid);
return 0;
}
@ -1681,36 +1686,6 @@ static void ref_transaction_rejection_handler(const char *refname,
*data->retcode = 1;
}
/*
* Commit the reference transaction. If it isn't an atomic transaction, handle
* rejected updates as part of using batched updates.
*/
static int commit_ref_transaction(struct ref_transaction **transaction,
bool is_atomic, const char *remote_name,
struct strbuf *err)
{
int retcode = ref_transaction_commit(*transaction, err);
if (retcode)
goto out;
if (!is_atomic) {
struct ref_rejection_data data = {
.conflict_msg_shown = 0,
.remote_name = remote_name,
.retcode = &retcode,
};
ref_transaction_for_each_rejected_update(*transaction,
ref_transaction_rejection_handler,
&data);
}
out:
ref_transaction_free(*transaction);
*transaction = NULL;
return retcode;
}
static int do_fetch(struct transport *transport,
struct refspec *rs,
const struct fetch_config *config)
@ -1883,14 +1858,33 @@ static int do_fetch(struct transport *transport,
if (retcode)
goto cleanup;
retcode = commit_ref_transaction(&transaction, atomic_fetch,
transport->remote->name, &err);
/*
* With '--atomic', bail out if the transaction fails. Without '--atomic',
* continue to fetch head and perform other post-fetch operations.
*/
if (retcode && atomic_fetch)
retcode = ref_transaction_commit(transaction, &err);
if (retcode) {
/*
* Explicitly handle transaction cleanup to avoid
* aborting an already closed transaction.
*/
ref_transaction_free(transaction);
transaction = NULL;
goto cleanup;
}
if (!atomic_fetch) {
struct ref_rejection_data data = {
.retcode = &retcode,
.conflict_msg_shown = 0,
.remote_name = transport->remote->name,
};
ref_transaction_for_each_rejected_update(transaction,
ref_transaction_rejection_handler,
&data);
if (retcode) {
ref_transaction_free(transaction);
transaction = NULL;
goto cleanup;
}
}
commit_fetch_head(&fetch_head);
@ -1956,14 +1950,6 @@ static int do_fetch(struct transport *transport,
}
cleanup:
/*
* When using batched updates, we want to commit the non-rejected
* updates and also handle the rejections.
*/
if (retcode && !atomic_fetch && transaction)
commit_ref_transaction(&transaction, false,
transport->remote->name, &err);
if (retcode) {
if (err.len) {
error("%s", err.buf);

View File

@ -13,11 +13,11 @@
#include "fsck.h"
#include "parse-options.h"
#include "progress.h"
#include "streaming.h"
#include "packfile.h"
#include "object-file.h"
#include "object-name.h"
#include "odb.h"
#include "odb/streaming.h"
#include "path.h"
#include "read-cache-ll.h"
#include "replace-object.h"
@ -340,8 +340,7 @@ static void check_unreachable_object(struct object *obj)
}
f = xfopen(filename, "w");
if (obj->type == OBJ_BLOB) {
if (odb_stream_blob_to_fd(the_repository->objects, fileno(f),
&obj->oid, NULL, 1))
if (stream_blob_to_fd(fileno(f), &obj->oid, NULL, 1))
die_errno(_("could not write '%s'"), filename);
} else
fprintf(f, "%s\n", describe_object(&obj->oid));
@ -531,13 +530,14 @@ static int fsck_handle_reflog(const char *logname, void *cb_data)
return 0;
}
static int fsck_handle_ref(const struct reference *ref, void *cb_data UNUSED)
static int fsck_handle_ref(const char *refname, const char *referent UNUSED, const struct object_id *oid,
int flag UNUSED, void *cb_data UNUSED)
{
struct object *obj;
obj = parse_object(the_repository, ref->oid);
obj = parse_object(the_repository, oid);
if (!obj) {
if (is_promisor_object(the_repository, ref->oid)) {
if (is_promisor_object(the_repository, oid)) {
/*
* Increment default_refs anyway, because this is a
* valid ref.
@ -546,19 +546,19 @@ static int fsck_handle_ref(const struct reference *ref, void *cb_data UNUSED)
return 0;
}
error(_("%s: invalid sha1 pointer %s"),
ref->name, oid_to_hex(ref->oid));
refname, oid_to_hex(oid));
errors_found |= ERROR_REACHABLE;
/* We'll continue with the rest despite the error.. */
return 0;
}
if (obj->type != OBJ_COMMIT && is_branch(ref->name)) {
error(_("%s: not a commit"), ref->name);
if (obj->type != OBJ_COMMIT && is_branch(refname)) {
error(_("%s: not a commit"), refname);
errors_found |= ERROR_REFS;
}
default_refs++;
obj->flags |= USED;
fsck_put_object_name(&fsck_walk_options,
ref->oid, "%s", ref->name);
oid, "%s", refname);
mark_object_reachable(obj);
return 0;
@ -580,19 +580,13 @@ static void get_default_heads(void)
worktrees = get_worktrees();
for (p = worktrees; *p; p++) {
struct worktree *wt = *p;
struct strbuf refname = STRBUF_INIT;
struct strbuf ref = STRBUF_INIT;
strbuf_worktree_ref(wt, &refname, "HEAD");
fsck_head_link(refname.buf, &head_points_at, &head_oid);
if (head_points_at && !is_null_oid(&head_oid)) {
struct reference ref = {
.name = refname.buf,
.oid = &head_oid,
};
fsck_handle_ref(&ref, NULL);
}
strbuf_release(&refname);
strbuf_worktree_ref(wt, &ref, "HEAD");
fsck_head_link(ref.buf, &head_points_at, &head_oid);
if (head_points_at && !is_null_oid(&head_oid))
fsck_handle_ref(ref.buf, NULL, &head_oid, 0, NULL);
strbuf_release(&ref);
if (include_reflogs)
refs_for_each_reflog(get_worktree_ref_store(wt),

View File

@ -36,7 +36,6 @@
#include "reflog.h"
#include "repack.h"
#include "rerere.h"
#include "revision.h"
#include "blob.h"
#include "tree.h"
#include "promisor-remote.h"
@ -287,26 +286,12 @@ static void maintenance_run_opts_release(struct maintenance_run_opts *opts)
static int pack_refs_condition(UNUSED struct gc_config *cfg)
{
struct string_list included_refs = STRING_LIST_INIT_NODUP;
struct ref_exclusions excludes = REF_EXCLUSIONS_INIT;
struct refs_optimize_opts optimize_opts = {
.exclusions = &excludes,
.includes = &included_refs,
.flags = REFS_OPTIMIZE_PRUNE | REFS_OPTIMIZE_AUTO,
};
bool required;
/* Check for all refs, similar to 'git refs optimize --all'. */
string_list_append(optimize_opts.includes, "*");
if (refs_optimize_required(get_main_ref_store(the_repository),
&optimize_opts, &required))
return 0;
clear_ref_exclusions(&excludes);
string_list_clear(&included_refs, 0);
return required;
/*
* The auto-repacking logic for refs is handled by the ref backends and
* exposed via `git pack-refs --auto`. We thus always return truish
* here and let the backend decide for us.
*/
return 1;
}
static int maintenance_task_pack_refs(struct maintenance_run_opts *opts,
@ -1063,7 +1048,7 @@ int cmd_gc(int argc,
report_garbage = report_pack_garbage;
odb_reprepare(the_repository->objects);
if (pack_garbage.nr > 0) {
odb_close(the_repository->objects);
close_object_store(the_repository->objects);
clean_pack_garbage();
}
@ -1110,26 +1095,32 @@ static int maintenance_opt_schedule(const struct option *opt, const char *arg,
return 0;
}
/* Remember to update object flag allocation in object.h */
#define SEEN (1u<<0)
struct cg_auto_data {
int num_not_in_graph;
int limit;
};
static int dfs_on_ref(const struct reference *ref, void *cb_data)
static int dfs_on_ref(const char *refname UNUSED,
const char *referent UNUSED,
const struct object_id *oid,
int flags UNUSED,
void *cb_data)
{
struct cg_auto_data *data = (struct cg_auto_data *)cb_data;
int result = 0;
const struct object_id *maybe_peeled = ref->oid;
struct object_id peeled;
struct commit_list *stack = NULL;
struct commit *commit;
if (!reference_get_peeled_oid(the_repository, ref, &peeled))
maybe_peeled = &peeled;
if (odb_read_object_info(the_repository->objects, maybe_peeled, NULL) != OBJ_COMMIT)
if (!peel_iterated_oid(the_repository, oid, &peeled))
oid = &peeled;
if (odb_read_object_info(the_repository->objects, oid, NULL) != OBJ_COMMIT)
return 0;
commit = lookup_commit(the_repository, maybe_peeled);
commit = lookup_commit(the_repository, oid);
if (!commit)
return 0;
if (repo_parse_commit(the_repository, commit) ||
@ -3456,67 +3447,7 @@ static int maintenance_stop(int argc, const char **argv, const char *prefix,
return update_background_schedule(NULL, 0);
}
static const char *const builtin_maintenance_is_needed_usage[] = {
"git maintenance is-needed [--task=<task>] [--schedule]",
NULL
};
static int maintenance_is_needed(int argc, const char **argv, const char *prefix,
struct repository *repo UNUSED)
{
struct maintenance_run_opts opts = MAINTENANCE_RUN_OPTS_INIT;
struct string_list selected_tasks = STRING_LIST_INIT_DUP;
struct gc_config cfg = GC_CONFIG_INIT;
struct option options[] = {
OPT_BOOL(0, "auto", &opts.auto_flag,
N_("run tasks based on the state of the repository")),
OPT_CALLBACK_F(0, "task", &selected_tasks, N_("task"),
N_("check a specific task"),
PARSE_OPT_NONEG, task_option_parse),
OPT_END()
};
bool is_needed = false;
argc = parse_options(argc, argv, prefix, options,
builtin_maintenance_is_needed_usage,
PARSE_OPT_STOP_AT_NON_OPTION);
if (argc)
usage_with_options(builtin_maintenance_is_needed_usage, options);
gc_config(&cfg);
initialize_task_config(&opts, &selected_tasks);
if (opts.auto_flag) {
for (size_t i = 0; i < opts.tasks_nr; i++) {
if (tasks[opts.tasks[i]].auto_condition &&
tasks[opts.tasks[i]].auto_condition(&cfg)) {
is_needed = true;
break;
}
}
} else {
/*
* When not using --auto we always require maintenance right now.
*
* TODO: this certainly is too eager, as some maintenance tasks may
* decide to not do anything because the data structures are already
* fully optimized. We may eventually want to extend the auto
* condition to also cover non-auto runs so that we can detect such
* cases.
*/
is_needed = true;
}
string_list_clear(&selected_tasks, 0);
maintenance_run_opts_release(&opts);
gc_config_release(&cfg);
if (is_needed)
return 0;
return 1;
}
static const char *const builtin_maintenance_usage[] = {
static const char * const builtin_maintenance_usage[] = {
N_("git maintenance <subcommand> [<options>]"),
NULL,
};
@ -3533,7 +3464,6 @@ int cmd_maintenance(int argc,
OPT_SUBCOMMAND("stop", &fn, maintenance_stop),
OPT_SUBCOMMAND("register", &fn, maintenance_register),
OPT_SUBCOMMAND("unregister", &fn, maintenance_unregister),
OPT_SUBCOMMAND("is-needed", &fn, maintenance_is_needed),
OPT_END(),
};

View File

@ -43,12 +43,6 @@ static int run(int argc, const char **argv, const char *prefix,
if (!argc)
goto usage;
/*
* All current "hook run" use-cases require ungrouped child output.
* If this changes, a hook run argument can be added to toggle it.
*/
opt.ungroup = 1;
/*
* Having a -- for "run" when providing <hook-args> is
* mandatory.

View File

@ -16,12 +16,12 @@
#include "progress.h"
#include "fsck.h"
#include "strbuf.h"
#include "streaming.h"
#include "thread-utils.h"
#include "packfile.h"
#include "pack-revindex.h"
#include "object-file.h"
#include "odb.h"
#include "odb/streaming.h"
#include "oid-array.h"
#include "oidset.h"
#include "path.h"
@ -762,7 +762,7 @@ static void find_ref_delta_children(const struct object_id *oid,
struct compare_data {
struct object_entry *entry;
struct odb_read_stream *st;
struct git_istream *st;
unsigned char *buf;
unsigned long buf_size;
};
@ -779,7 +779,7 @@ static int compare_objects(const unsigned char *buf, unsigned long size,
}
while (size) {
ssize_t len = odb_read_stream_read(data->st, data->buf, size);
ssize_t len = read_istream(data->st, data->buf, size);
if (len == 0)
die(_("SHA1 COLLISION FOUND WITH %s !"),
oid_to_hex(&data->entry->idx.oid));
@ -798,6 +798,8 @@ static int compare_objects(const unsigned char *buf, unsigned long size,
static int check_collison(struct object_entry *entry)
{
struct compare_data data;
enum object_type type;
unsigned long size;
if (entry->size <= repo_settings_get_big_file_threshold(the_repository) ||
entry->type != OBJ_BLOB)
@ -805,14 +807,15 @@ static int check_collison(struct object_entry *entry)
memset(&data, 0, sizeof(data));
data.entry = entry;
data.st = odb_read_stream_open(the_repository->objects, &entry->idx.oid, NULL);
data.st = open_istream(the_repository, &entry->idx.oid, &type, &size,
NULL);
if (!data.st)
return -1;
if (data.st->size != entry->size || data.st->type != entry->type)
if (size != entry->size || type != entry->type)
die(_("SHA1 COLLISION FOUND WITH %s !"),
oid_to_hex(&entry->idx.oid));
unpack_data(entry, compare_objects, &data);
odb_read_stream_close(data.st);
close_istream(data.st);
free(data.buf);
return 0;
}
@ -1637,7 +1640,7 @@ static void final(const char *final_pack_name, const char *curr_pack_name,
rename_tmp_packfile(&final_index_name, curr_index_name, &index_name,
hash, "idx", 1);
if (do_fsck_object && startup_info->have_repository)
if (do_fsck_object)
packfile_store_load_pack(the_repository->objects->packfiles,
final_index_name, 0);
@ -2107,23 +2110,8 @@ int cmd_index_pack(int argc,
else
close(input_fd);
if (do_fsck_object) {
/*
* We cannot perform queued consistency checks when running
* outside of a repository because those require us to read
* from the object database, which is uninitialized.
*
* TODO: we may eventually set up an in-memory object database,
* which would allow us to perform these queued checks.
*/
if (!startup_info->have_repository &&
fsck_has_queued_checks(&fsck_options))
die(_("cannot perform queued object checks outside "
"of a repository"));
if (fsck_finish(&fsck_options))
die(_("fsck error in pack objects"));
}
if (do_fsck_object && fsck_finish(&fsck_options))
die(_("fsck error in pack objects"));
free(opts.anomaly);
free(objects);

View File

@ -2,32 +2,26 @@
#include "bloom.h"
#include "builtin.h"
#include "commit-graph.h"
#include "commit-slab.h"
#include "commit.h"
#include "config.h"
#include "environment.h"
#include "diff.h"
#include "diffcore.h"
#include "environment.h"
#include "ewah/ewok.h"
#include "hashmap.h"
#include "hex.h"
#include "log-tree.h"
#include "object-name.h"
#include "object.h"
#include "parse-options.h"
#include "prio-queue.h"
#include "quote.h"
#include "repository.h"
#include "revision.h"
/* Remember to update object flag allocation in object.h */
#define PARENT1 (1u<<16) /* used instead of SEEN */
#define PARENT2 (1u<<17) /* used instead of BOTTOM, BOUNDARY */
struct last_modified_entry {
struct hashmap_entry hashent;
struct object_id oid;
struct bloom_key key;
size_t diff_idx;
const char path[FLEX_ARRAY];
};
@ -43,45 +37,13 @@ static int last_modified_entry_hashcmp(const void *unused UNUSED,
return strcmp(ent1->path, path ? path : ent2->path);
}
/*
* Hold a bitmap for each commit we're working with. In the bitmap, each bit
* represents a path in `lm->all_paths`. An active bit indicates the path still
* needs to be associated to a commit.
*/
define_commit_slab(active_paths_for_commit, struct bitmap *);
struct last_modified {
struct hashmap paths;
struct rev_info rev;
bool recursive;
bool show_trees;
const char **all_paths;
size_t all_paths_nr;
struct active_paths_for_commit active_paths;
/* 'scratch' to avoid allocating a bitmap every process_parent() */
struct bitmap *scratch;
};
static struct bitmap *active_paths_for(struct last_modified *lm, struct commit *c)
{
struct bitmap **bitmap = active_paths_for_commit_at(&lm->active_paths, c);
if (!*bitmap)
*bitmap = bitmap_word_alloc(lm->all_paths_nr / BITS_IN_EWORD + 1);
return *bitmap;
}
static void active_paths_free(struct last_modified *lm, struct commit *c)
{
struct bitmap **bitmap = active_paths_for_commit_at(&lm->active_paths, c);
if (*bitmap) {
bitmap_free(*bitmap);
*bitmap = NULL;
}
}
static void last_modified_release(struct last_modified *lm)
{
struct hashmap_iter iter;
@ -92,8 +54,6 @@ static void last_modified_release(struct last_modified *lm)
hashmap_clear_and_free(&lm->paths, struct last_modified_entry, hashent);
release_revisions(&lm->rev);
free(lm->all_paths);
}
struct last_modified_callback_data {
@ -186,7 +146,7 @@ static void mark_path(const char *path, const struct object_id *oid,
* Is it arriving at a version of interest, or is it from a side branch
* which did not contribute to the final state?
*/
if (oid && !oideq(oid, &ent->oid))
if (!oideq(oid, &ent->oid))
return;
last_modified_emit(data->lm, path, data->commit);
@ -236,17 +196,7 @@ static void last_modified_diff(struct diff_queue_struct *q,
}
}
static void pass_to_parent(struct bitmap *c,
struct bitmap *p,
size_t pos)
{
bitmap_unset(c, pos);
bitmap_set(p, pos);
}
static bool maybe_changed_path(struct last_modified *lm,
struct commit *origin,
struct bitmap *active)
static bool maybe_changed_path(struct last_modified *lm, struct commit *origin)
{
struct bloom_filter *filter;
struct last_modified_entry *ent;
@ -263,9 +213,6 @@ static bool maybe_changed_path(struct last_modified *lm,
return true;
hashmap_for_each_entry(&lm->paths, &iter, ent, hashent) {
if (active && !bitmap_get(active, ent->diff_idx))
continue;
if (bloom_filter_contains(filter, &ent->key,
lm->rev.bloom_filter_settings))
return true;
@ -273,202 +220,42 @@ static bool maybe_changed_path(struct last_modified *lm,
return false;
}
static void process_parent(struct last_modified *lm,
struct prio_queue *queue,
struct commit *c, struct bitmap *active_c,
struct commit *parent, int parent_i)
{
struct bitmap *active_p;
repo_parse_commit(lm->rev.repo, parent);
active_p = active_paths_for(lm, parent);
/*
* The first time entering this function for this commit (i.e. first parent)
* see if Bloom filters will tell us it's worth to do the diff.
*/
if (parent_i || maybe_changed_path(lm, c, active_c)) {
diff_tree_oid(&parent->object.oid,
&c->object.oid, "", &lm->rev.diffopt);
diffcore_std(&lm->rev.diffopt);
}
/*
* Test each path for TREESAME-ness against the parent. If a path is
* TREESAME, pass it on to this parent.
*
* First, collect all paths that are *not* TREESAME in 'scratch'.
* Then, pass paths that *are* TREESAME and active to the parent.
*/
for (int i = 0; i < diff_queued_diff.nr; i++) {
struct diff_filepair *fp = diff_queued_diff.queue[i];
const char *path = fp->two->path;
struct last_modified_entry *ent =
hashmap_get_entry_from_hash(&lm->paths, strhash(path), path,
struct last_modified_entry, hashent);
if (ent) {
size_t k = ent->diff_idx;
if (bitmap_get(active_c, k))
bitmap_set(lm->scratch, k);
}
}
for (size_t i = 0; i < lm->all_paths_nr; i++) {
if (bitmap_get(active_c, i) && !bitmap_get(lm->scratch, i))
pass_to_parent(active_c, active_p, i);
}
/*
* If parent has any active paths, put it on the queue (if not already).
*/
if (!bitmap_is_empty(active_p) && !(parent->object.flags & PARENT1)) {
parent->object.flags |= PARENT1;
prio_queue_put(queue, parent);
}
if (!(parent->object.flags & PARENT1))
active_paths_free(lm, parent);
MEMZERO_ARRAY(lm->scratch->words, lm->scratch->word_alloc);
diff_queue_clear(&diff_queued_diff);
}
static int last_modified_run(struct last_modified *lm)
{
int max_count, queue_popped = 0;
struct prio_queue queue = { compare_commits_by_gen_then_commit_date };
struct prio_queue not_queue = { compare_commits_by_gen_then_commit_date };
struct commit_list *list;
struct last_modified_callback_data data = { .lm = lm };
lm->rev.diffopt.output_format = DIFF_FORMAT_CALLBACK;
lm->rev.diffopt.format_callback = last_modified_diff;
lm->rev.diffopt.format_callback_data = &data;
lm->rev.no_walk = 1;
prepare_revision_walk(&lm->rev);
max_count = lm->rev.max_count;
while (hashmap_get_size(&lm->paths)) {
data.commit = get_revision(&lm->rev);
if (!data.commit)
BUG("paths remaining beyond boundary in last-modified");
init_active_paths_for_commit(&lm->active_paths);
lm->scratch = bitmap_word_alloc(lm->all_paths_nr);
/*
* lm->rev.commits holds the set of boundary commits for our walk.
*
* Loop through each such commit, and place it in the appropriate queue.
*/
for (list = lm->rev.commits; list; list = list->next) {
struct commit *c = list->item;
if (c->object.flags & BOTTOM) {
prio_queue_put(&not_queue, c);
c->object.flags |= PARENT2;
} else if (!(c->object.flags & PARENT1)) {
/*
* If the commit is a starting point (and hasn't been
* seen yet), then initialize the set of interesting
* paths, too.
*/
struct bitmap *active;
prio_queue_put(&queue, c);
c->object.flags |= PARENT1;
active = active_paths_for(lm, c);
for (size_t i = 0; i < lm->all_paths_nr; i++)
bitmap_set(active, i);
}
}
while (queue.nr) {
int parent_i;
struct commit_list *p;
struct commit *c = prio_queue_get(&queue);
struct bitmap *active_c = active_paths_for(lm, c);
if ((0 <= max_count && max_count < ++queue_popped) ||
(c->object.flags & PARENT2)) {
/*
* Either a boundary commit, or we have already seen too
* many others. Either way, stop here.
*/
c->object.flags |= PARENT2 | BOUNDARY;
data.commit = c;
if (data.commit->object.flags & BOUNDARY) {
diff_tree_oid(lm->rev.repo->hash_algo->empty_tree,
&c->object.oid,
"", &lm->rev.diffopt);
&data.commit->object.oid, "",
&lm->rev.diffopt);
diff_flush(&lm->rev.diffopt);
goto cleanup;
break;
}
/*
* Otherwise, make sure that 'c' isn't reachable from anything
* in the '--not' queue.
*/
repo_parse_commit(lm->rev.repo, c);
if (!maybe_changed_path(lm, data.commit))
continue;
while (not_queue.nr) {
struct commit_list *np;
struct commit *n = prio_queue_get(&not_queue);
repo_parse_commit(lm->rev.repo, n);
for (np = n->parents; np; np = np->next) {
if (!(np->item->object.flags & PARENT2)) {
prio_queue_put(&not_queue, np->item);
np->item->object.flags |= PARENT2;
}
}
if (commit_graph_generation(n) < commit_graph_generation(c))
break;
}
/*
* Look at each parent and pass on each path that's TREESAME
* with that parent. Stop early when no active paths remain.
*/
for (p = c->parents, parent_i = 0; p; p = p->next, parent_i++) {
process_parent(lm, &queue,
c, active_c,
p->item, parent_i);
if (bitmap_is_empty(active_c))
break;
}
/*
* Paths that remain active, or not TREESAME with any parent,
* were changed by 'c'.
*/
if (!bitmap_is_empty(active_c)) {
data.commit = c;
for (size_t i = 0; i < lm->all_paths_nr; i++) {
if (bitmap_get(active_c, i))
mark_path(lm->all_paths[i], NULL, &data);
}
}
cleanup:
active_paths_free(lm, c);
log_tree_commit(&lm->rev, data.commit);
}
if (hashmap_get_size(&lm->paths))
BUG("paths remaining beyond boundary in last-modified");
clear_prio_queue(&not_queue);
clear_prio_queue(&queue);
clear_active_paths_for_commit(&lm->active_paths);
bitmap_free(lm->scratch);
return 0;
}
static int last_modified_init(struct last_modified *lm, struct repository *r,
const char *prefix, int argc, const char **argv)
{
struct hashmap_iter iter;
struct last_modified_entry *ent;
hashmap_init(&lm->paths, last_modified_entry_hashcmp, NULL, 0);
repo_init_revisions(r, &lm->rev, prefix);
@ -493,13 +280,6 @@ static int last_modified_init(struct last_modified *lm, struct repository *r,
if (populate_paths_from_revs(lm) < 0)
return error(_("unable to setup last-modified"));
CALLOC_ARRAY(lm->all_paths, hashmap_get_size(&lm->paths));
lm->all_paths_nr = 0;
hashmap_for_each_entry(&lm->paths, &iter, ent, hashent) {
ent->diff_idx = lm->all_paths_nr++;
lm->all_paths[ent->diff_idx] = ent->path;
}
return 0;
}
@ -525,8 +305,7 @@ int cmd_last_modified(int argc, const char **argv, const char *prefix,
argc = parse_options(argc, argv, prefix, last_modified_options,
last_modified_usage,
PARSE_OPT_KEEP_ARGV0 | PARSE_OPT_KEEP_UNKNOWN_OPT |
PARSE_OPT_KEEP_DASHDASH);
PARSE_OPT_KEEP_ARGV0 | PARSE_OPT_KEEP_UNKNOWN_OPT);
repo_config(repo, git_default_config, NULL);

View File

@ -16,7 +16,6 @@
#include "refs.h"
#include "object-name.h"
#include "odb.h"
#include "odb/streaming.h"
#include "pager.h"
#include "color.h"
#include "commit.h"
@ -36,6 +35,7 @@
#include "parse-options.h"
#include "line-log.h"
#include "branch.h"
#include "streaming.h"
#include "version.h"
#include "mailmap.h"
#include "progress.h"
@ -584,7 +584,7 @@ static int show_blob_object(const struct object_id *oid, struct rev_info *rev, c
fflush(rev->diffopt.file);
if (!rev->diffopt.flags.textconv_set_via_cmdline ||
!rev->diffopt.flags.allow_textconv)
return odb_stream_blob_to_fd(the_repository->objects, 1, oid, NULL, 0);
return stream_blob_to_fd(1, oid, NULL, 0);
if (get_oid_with_context(the_repository, obj_name,
GET_OID_RECORD_PATH,
@ -594,7 +594,7 @@ static int show_blob_object(const struct object_id *oid, struct rev_info *rev, c
!textconv_object(the_repository, obj_context.path,
obj_context.mode, &oidc, 1, &buf, &size)) {
object_context_release(&obj_context);
return odb_stream_blob_to_fd(the_repository->objects, 1, oid, NULL, 0);
return stream_blob_to_fd(1, oid, NULL, 0);
}
if (!buf)

View File

@ -156,7 +156,7 @@ int cmd_ls_remote(int argc,
continue;
if (!tail_match(&pattern, ref->name))
continue;
item = ref_array_push(&ref_array, ref->name, &ref->old_oid, NULL);
item = ref_array_push(&ref_array, ref->name, &ref->old_oid);
item->symref = xstrdup_or_null(ref->symref);
}

View File

@ -339,9 +339,10 @@ static int cmp_by_tag_and_age(const void *a_, const void *b_)
return a->taggerdate != b->taggerdate;
}
static int name_ref(const struct reference *ref, void *cb_data)
static int name_ref(const char *path, const char *referent UNUSED, const struct object_id *oid,
int flags UNUSED, void *cb_data)
{
struct object *o = parse_object(the_repository, ref->oid);
struct object *o = parse_object(the_repository, oid);
struct name_ref_data *data = cb_data;
int can_abbreviate_output = data->tags_only && data->name_only;
int deref = 0;
@ -349,14 +350,14 @@ static int name_ref(const struct reference *ref, void *cb_data)
struct commit *commit = NULL;
timestamp_t taggerdate = TIME_MAX;
if (data->tags_only && !starts_with(ref->name, "refs/tags/"))
if (data->tags_only && !starts_with(path, "refs/tags/"))
return 0;
if (data->exclude_filters.nr) {
struct string_list_item *item;
for_each_string_list_item(item, &data->exclude_filters) {
if (subpath_matches(ref->name, item->string) >= 0)
if (subpath_matches(path, item->string) >= 0)
return 0;
}
}
@ -377,7 +378,7 @@ static int name_ref(const struct reference *ref, void *cb_data)
* shouldn't stop when seeing 'refs/tags/v1.4' matches
* 'refs/tags/v*'. We should show it as 'v1.4'.
*/
switch (subpath_matches(ref->name, item->string)) {
switch (subpath_matches(path, item->string)) {
case -1: /* did not match */
break;
case 0: /* matched fully */
@ -405,13 +406,13 @@ static int name_ref(const struct reference *ref, void *cb_data)
}
if (o && o->type == OBJ_COMMIT) {
commit = (struct commit *)o;
from_tag = starts_with(ref->name, "refs/tags/");
from_tag = starts_with(path, "refs/tags/");
if (taggerdate == TIME_MAX)
taggerdate = commit->date;
}
add_to_tip_table(ref->oid, ref->name, can_abbreviate_output,
commit, taggerdate, from_tag, deref);
add_to_tip_table(oid, path, can_abbreviate_output, commit, taggerdate,
from_tag, deref);
return 0;
}

View File

@ -22,6 +22,7 @@
#include "pack-objects.h"
#include "progress.h"
#include "refs.h"
#include "streaming.h"
#include "thread-utils.h"
#include "pack-bitmap.h"
#include "delta-islands.h"
@ -32,7 +33,6 @@
#include "packfile.h"
#include "object-file.h"
#include "odb.h"
#include "odb/streaming.h"
#include "replace-object.h"
#include "dir.h"
#include "midx.h"
@ -404,7 +404,7 @@ static unsigned long do_compress(void **pptr, unsigned long size)
return stream.total_out;
}
static unsigned long write_large_blob_data(struct odb_read_stream *st, struct hashfile *f,
static unsigned long write_large_blob_data(struct git_istream *st, struct hashfile *f,
const struct object_id *oid)
{
git_zstream stream;
@ -417,7 +417,7 @@ static unsigned long write_large_blob_data(struct odb_read_stream *st, struct ha
for (;;) {
ssize_t readlen;
int zret = Z_OK;
readlen = odb_read_stream_read(st, ibuf, sizeof(ibuf));
readlen = read_istream(st, ibuf, sizeof(ibuf));
if (readlen == -1)
die(_("unable to read %s"), oid_to_hex(oid));
@ -513,19 +513,17 @@ static unsigned long write_no_reuse_object(struct hashfile *f, struct object_ent
unsigned hdrlen;
enum object_type type;
void *buf;
struct odb_read_stream *st = NULL;
struct git_istream *st = NULL;
const unsigned hashsz = the_hash_algo->rawsz;
if (!usable_delta) {
if (oe_type(entry) == OBJ_BLOB &&
oe_size_greater_than(&to_pack, entry,
repo_settings_get_big_file_threshold(the_repository)) &&
(st = odb_read_stream_open(the_repository->objects, &entry->idx.oid,
NULL)) != NULL) {
(st = open_istream(the_repository, &entry->idx.oid, &type,
&size, NULL)) != NULL)
buf = NULL;
type = st->type;
size = st->size;
} else {
else {
buf = odb_read_object(the_repository->objects,
&entry->idx.oid, &type,
&size);
@ -579,7 +577,7 @@ static unsigned long write_no_reuse_object(struct hashfile *f, struct object_ent
dheader[--pos] = 128 | (--ofs & 127);
if (limit && hdrlen + sizeof(dheader) - pos + datalen + hashsz >= limit) {
if (st)
odb_read_stream_close(st);
close_istream(st);
free(buf);
return 0;
}
@ -593,7 +591,7 @@ static unsigned long write_no_reuse_object(struct hashfile *f, struct object_ent
*/
if (limit && hdrlen + hashsz + datalen + hashsz >= limit) {
if (st)
odb_read_stream_close(st);
close_istream(st);
free(buf);
return 0;
}
@ -603,7 +601,7 @@ static unsigned long write_no_reuse_object(struct hashfile *f, struct object_ent
} else {
if (limit && hdrlen + datalen + hashsz >= limit) {
if (st)
odb_read_stream_close(st);
close_istream(st);
free(buf);
return 0;
}
@ -611,7 +609,7 @@ static unsigned long write_no_reuse_object(struct hashfile *f, struct object_ent
}
if (st) {
datalen = write_large_blob_data(st, f, &entry->idx.oid);
odb_read_stream_close(st);
close_istream(st);
} else {
hashwrite(f, buf, datalen);
free(buf);
@ -833,14 +831,15 @@ static enum write_one_status write_one(struct hashfile *f,
return WRITE_ONE_WRITTEN;
}
static int mark_tagged(const struct reference *ref, void *cb_data UNUSED)
static int mark_tagged(const char *path UNUSED, const char *referent UNUSED, const struct object_id *oid,
int flag UNUSED, void *cb_data UNUSED)
{
struct object_id peeled;
struct object_entry *entry = packlist_find(&to_pack, ref->oid);
struct object_entry *entry = packlist_find(&to_pack, oid);
if (entry)
entry->tagged = 1;
if (!reference_get_peeled_oid(the_repository, ref, &peeled)) {
if (!peel_iterated_oid(the_repository, oid, &peeled)) {
entry = packlist_find(&to_pack, &peeled);
if (entry)
entry->tagged = 1;
@ -1707,8 +1706,8 @@ static int want_object_in_pack_mtime(const struct object_id *oid,
uint32_t found_mtime)
{
int want;
struct packfile_list_entry *e;
struct odb_source *source;
struct list_head *pos;
if (!exclude && local) {
/*
@ -1717,7 +1716,7 @@ static int want_object_in_pack_mtime(const struct object_id *oid,
*/
struct odb_source *source = the_repository->objects->sources->next;
for (; source; source = source->next)
if (odb_source_loose_has_object(source, oid))
if (has_loose_object(source, oid))
return 0;
}
@ -1749,11 +1748,12 @@ static int want_object_in_pack_mtime(const struct object_id *oid,
}
}
for (e = the_repository->objects->packfiles->packs.head; e; e = e->next) {
struct packed_git *p = e->pack;
list_for_each(pos, packfile_store_get_packs_mru(the_repository->objects->packfiles)) {
struct packed_git *p = list_entry(pos, struct packed_git, mru);
want = want_object_in_pack_one(p, oid, exclude, found_pack, found_offset, found_mtime);
if (!exclude && want > 0)
packfile_list_prepend(&the_repository->objects->packfiles->packs, p);
list_move(&p->mru,
packfile_store_get_packs_mru(the_repository->objects->packfiles));
if (want != -1)
return want;
}
@ -3293,7 +3293,7 @@ static void add_tag_chain(const struct object_id *oid)
tag = lookup_tag(the_repository, oid);
while (1) {
if (!tag || parse_tag(the_repository, tag) || !tag->tagged)
if (!tag || parse_tag(tag) || !tag->tagged)
die(_("unable to pack objects reachable from tag %s"),
oid_to_hex(oid));
@ -3306,13 +3306,13 @@ static void add_tag_chain(const struct object_id *oid)
}
}
static int add_ref_tag(const struct reference *ref, void *cb_data UNUSED)
static int add_ref_tag(const char *tag UNUSED, const char *referent UNUSED, const struct object_id *oid,
int flag UNUSED, void *cb_data UNUSED)
{
struct object_id peeled;
if (!reference_get_peeled_oid(the_repository, ref, &peeled) &&
obj_is_packed(&peeled))
add_tag_chain(ref->oid);
if (!peel_iterated_oid(the_repository, oid, &peeled) && obj_is_packed(&peeled))
add_tag_chain(oid);
return 0;
}
@ -3978,7 +3978,7 @@ static void add_cruft_object_entry(const struct object_id *oid, enum object_type
int found = 0;
for (; !found && source; source = source->next)
if (odb_source_loose_has_object(source, oid))
if (has_loose_object(source, oid))
found = 1;
/*
@ -4389,27 +4389,27 @@ static void add_unreachable_loose_objects(struct rev_info *revs)
static int has_sha1_pack_kept_or_nonlocal(const struct object_id *oid)
{
static struct packed_git *last_found = NULL;
struct packfile_store *packs = the_repository->objects->packfiles;
static struct packed_git *last_found = (void *)1;
struct packed_git *p;
if (last_found && find_pack_entry_one(oid, last_found))
return 1;
p = (last_found != (void *)1) ? last_found :
packfile_store_get_packs(packs);
repo_for_each_pack(the_repository, p) {
/*
* We have already checked `last_found`, so there is no need to
* re-check here.
*/
if (p == last_found)
continue;
if ((!p->pack_local || p->pack_keep || p->pack_keep_in_core) &&
find_pack_entry_one(oid, p)) {
while (p) {
if ((!p->pack_local || p->pack_keep ||
p->pack_keep_in_core) &&
find_pack_entry_one(oid, p)) {
last_found = p;
return 1;
}
if (p == last_found)
p = packfile_store_get_packs(packs);
else
p = p->next;
if (p == last_found)
p = p->next;
}
return 0;
}
@ -4528,16 +4528,19 @@ static void record_recent_commit(struct commit *commit, void *data UNUSED)
oid_array_append(&recent_objects, &commit->object.oid);
}
static int mark_bitmap_preferred_tip(const struct reference *ref, void *data UNUSED)
static int mark_bitmap_preferred_tip(const char *refname,
const char *referent UNUSED,
const struct object_id *oid,
int flags UNUSED,
void *data UNUSED)
{
const struct object_id *maybe_peeled = ref->oid;
struct object_id peeled;
struct object *object;
if (!reference_get_peeled_oid(the_repository, ref, &peeled))
maybe_peeled = &peeled;
if (!peel_iterated_oid(the_repository, oid, &peeled))
oid = &peeled;
object = parse_object_or_die(the_repository, maybe_peeled, ref->name);
object = parse_object_or_die(the_repository, oid, refname);
if (object->type == OBJ_COMMIT)
object->flags |= NEEDS_BITMAP;

View File

@ -119,6 +119,148 @@ static int opt_show_forced_updates = -1;
static const char *set_upstream;
static struct strvec opt_fetch = STRVEC_INIT;
static struct option pull_options[] = {
/* Shared options */
OPT__VERBOSITY(&opt_verbosity),
OPT_PASSTHRU(0, "progress", &opt_progress, NULL,
N_("force progress reporting"),
PARSE_OPT_NOARG),
OPT_CALLBACK_F(0, "recurse-submodules",
&recurse_submodules_cli, N_("on-demand"),
N_("control for recursive fetching of submodules"),
PARSE_OPT_OPTARG, option_fetch_parse_recurse_submodules),
/* Options passed to git-merge or git-rebase */
OPT_GROUP(N_("Options related to merging")),
OPT_CALLBACK_F('r', "rebase", &opt_rebase,
"(false|true|merges|interactive)",
N_("incorporate changes by rebasing rather than merging"),
PARSE_OPT_OPTARG, parse_opt_rebase),
OPT_PASSTHRU('n', NULL, &opt_diffstat, NULL,
N_("do not show a diffstat at the end of the merge"),
PARSE_OPT_NOARG | PARSE_OPT_NONEG),
OPT_PASSTHRU(0, "stat", &opt_diffstat, NULL,
N_("show a diffstat at the end of the merge"),
PARSE_OPT_NOARG),
OPT_PASSTHRU(0, "summary", &opt_diffstat, NULL,
N_("(synonym to --stat)"),
PARSE_OPT_NOARG | PARSE_OPT_HIDDEN),
OPT_PASSTHRU(0, "compact-summary", &opt_diffstat, NULL,
N_("show a compact-summary at the end of the merge"),
PARSE_OPT_NOARG),
OPT_PASSTHRU(0, "log", &opt_log, N_("n"),
N_("add (at most <n>) entries from shortlog to merge commit message"),
PARSE_OPT_OPTARG),
OPT_PASSTHRU(0, "signoff", &opt_signoff, NULL,
N_("add a Signed-off-by trailer"),
PARSE_OPT_OPTARG),
OPT_PASSTHRU(0, "squash", &opt_squash, NULL,
N_("create a single commit instead of doing a merge"),
PARSE_OPT_NOARG),
OPT_PASSTHRU(0, "commit", &opt_commit, NULL,
N_("perform a commit if the merge succeeds (default)"),
PARSE_OPT_NOARG),
OPT_PASSTHRU(0, "edit", &opt_edit, NULL,
N_("edit message before committing"),
PARSE_OPT_NOARG),
OPT_CLEANUP(&cleanup_arg),
OPT_PASSTHRU(0, "ff", &opt_ff, NULL,
N_("allow fast-forward"),
PARSE_OPT_NOARG),
OPT_PASSTHRU(0, "ff-only", &opt_ff, NULL,
N_("abort if fast-forward is not possible"),
PARSE_OPT_NOARG | PARSE_OPT_NONEG),
OPT_PASSTHRU(0, "verify", &opt_verify, NULL,
N_("control use of pre-merge-commit and commit-msg hooks"),
PARSE_OPT_NOARG),
OPT_PASSTHRU(0, "verify-signatures", &opt_verify_signatures, NULL,
N_("verify that the named commit has a valid GPG signature"),
PARSE_OPT_NOARG),
OPT_BOOL(0, "autostash", &opt_autostash,
N_("automatically stash/stash pop before and after")),
OPT_PASSTHRU_ARGV('s', "strategy", &opt_strategies, N_("strategy"),
N_("merge strategy to use"),
0),
OPT_PASSTHRU_ARGV('X', "strategy-option", &opt_strategy_opts,
N_("option=value"),
N_("option for selected merge strategy"),
0),
OPT_PASSTHRU('S', "gpg-sign", &opt_gpg_sign, N_("key-id"),
N_("GPG sign commit"),
PARSE_OPT_OPTARG),
OPT_SET_INT(0, "allow-unrelated-histories",
&opt_allow_unrelated_histories,
N_("allow merging unrelated histories"), 1),
/* Options passed to git-fetch */
OPT_GROUP(N_("Options related to fetching")),
OPT_PASSTHRU(0, "all", &opt_all, NULL,
N_("fetch from all remotes"),
PARSE_OPT_NOARG),
OPT_PASSTHRU('a', "append", &opt_append, NULL,
N_("append to .git/FETCH_HEAD instead of overwriting"),
PARSE_OPT_NOARG),
OPT_PASSTHRU(0, "upload-pack", &opt_upload_pack, N_("path"),
N_("path to upload pack on remote end"),
0),
OPT__FORCE(&opt_force, N_("force overwrite of local branch"), 0),
OPT_PASSTHRU('t', "tags", &opt_tags, NULL,
N_("fetch all tags and associated objects"),
PARSE_OPT_NOARG),
OPT_PASSTHRU('p', "prune", &opt_prune, NULL,
N_("prune remote-tracking branches no longer on remote"),
PARSE_OPT_NOARG),
OPT_PASSTHRU('j', "jobs", &max_children, N_("n"),
N_("number of submodules pulled in parallel"),
PARSE_OPT_OPTARG),
OPT_BOOL(0, "dry-run", &opt_dry_run,
N_("dry run")),
OPT_PASSTHRU('k', "keep", &opt_keep, NULL,
N_("keep downloaded pack"),
PARSE_OPT_NOARG),
OPT_PASSTHRU(0, "depth", &opt_depth, N_("depth"),
N_("deepen history of shallow clone"),
0),
OPT_PASSTHRU_ARGV(0, "shallow-since", &opt_fetch, N_("time"),
N_("deepen history of shallow repository based on time"),
0),
OPT_PASSTHRU_ARGV(0, "shallow-exclude", &opt_fetch, N_("ref"),
N_("deepen history of shallow clone, excluding ref"),
0),
OPT_PASSTHRU_ARGV(0, "deepen", &opt_fetch, N_("n"),
N_("deepen history of shallow clone"),
0),
OPT_PASSTHRU(0, "unshallow", &opt_unshallow, NULL,
N_("convert to a complete repository"),
PARSE_OPT_NONEG | PARSE_OPT_NOARG),
OPT_PASSTHRU(0, "update-shallow", &opt_update_shallow, NULL,
N_("accept refs that update .git/shallow"),
PARSE_OPT_NOARG),
OPT_PASSTHRU(0, "refmap", &opt_refmap, N_("refmap"),
N_("specify fetch refmap"),
PARSE_OPT_NONEG),
OPT_PASSTHRU_ARGV('o', "server-option", &opt_fetch,
N_("server-specific"),
N_("option to transmit"),
0),
OPT_PASSTHRU('4', "ipv4", &opt_ipv4, NULL,
N_("use IPv4 addresses only"),
PARSE_OPT_NOARG),
OPT_PASSTHRU('6', "ipv6", &opt_ipv6, NULL,
N_("use IPv6 addresses only"),
PARSE_OPT_NOARG),
OPT_PASSTHRU_ARGV(0, "negotiation-tip", &opt_fetch, N_("revision"),
N_("report that we have only objects reachable from this object"),
0),
OPT_BOOL(0, "show-forced-updates", &opt_show_forced_updates,
N_("check for forced-updates on all updated branches")),
OPT_PASSTHRU(0, "set-upstream", &set_upstream, NULL,
N_("set upstream for git pull/fetch"),
PARSE_OPT_NOARG),
OPT_END()
};
/**
* Pushes "-q" or "-v" switches into arr to match the opt_verbosity level.
*/
@ -866,147 +1008,6 @@ int cmd_pull(int argc,
int can_ff;
int divergent;
int ret;
static struct option pull_options[] = {
/* Shared options */
OPT__VERBOSITY(&opt_verbosity),
OPT_PASSTHRU(0, "progress", &opt_progress, NULL,
N_("force progress reporting"),
PARSE_OPT_NOARG),
OPT_CALLBACK_F(0, "recurse-submodules",
&recurse_submodules_cli, N_("on-demand"),
N_("control for recursive fetching of submodules"),
PARSE_OPT_OPTARG, option_fetch_parse_recurse_submodules),
/* Options passed to git-merge or git-rebase */
OPT_GROUP(N_("Options related to merging")),
OPT_CALLBACK_F('r', "rebase", &opt_rebase,
"(false|true|merges|interactive)",
N_("incorporate changes by rebasing rather than merging"),
PARSE_OPT_OPTARG, parse_opt_rebase),
OPT_PASSTHRU('n', NULL, &opt_diffstat, NULL,
N_("do not show a diffstat at the end of the merge"),
PARSE_OPT_NOARG | PARSE_OPT_NONEG),
OPT_PASSTHRU(0, "stat", &opt_diffstat, NULL,
N_("show a diffstat at the end of the merge"),
PARSE_OPT_NOARG),
OPT_PASSTHRU(0, "summary", &opt_diffstat, NULL,
N_("(synonym to --stat)"),
PARSE_OPT_NOARG | PARSE_OPT_HIDDEN),
OPT_PASSTHRU(0, "compact-summary", &opt_diffstat, NULL,
N_("show a compact-summary at the end of the merge"),
PARSE_OPT_NOARG),
OPT_PASSTHRU(0, "log", &opt_log, N_("n"),
N_("add (at most <n>) entries from shortlog to merge commit message"),
PARSE_OPT_OPTARG),
OPT_PASSTHRU(0, "signoff", &opt_signoff, NULL,
N_("add a Signed-off-by trailer"),
PARSE_OPT_OPTARG),
OPT_PASSTHRU(0, "squash", &opt_squash, NULL,
N_("create a single commit instead of doing a merge"),
PARSE_OPT_NOARG),
OPT_PASSTHRU(0, "commit", &opt_commit, NULL,
N_("perform a commit if the merge succeeds (default)"),
PARSE_OPT_NOARG),
OPT_PASSTHRU(0, "edit", &opt_edit, NULL,
N_("edit message before committing"),
PARSE_OPT_NOARG),
OPT_CLEANUP(&cleanup_arg),
OPT_PASSTHRU(0, "ff", &opt_ff, NULL,
N_("allow fast-forward"),
PARSE_OPT_NOARG),
OPT_PASSTHRU(0, "ff-only", &opt_ff, NULL,
N_("abort if fast-forward is not possible"),
PARSE_OPT_NOARG | PARSE_OPT_NONEG),
OPT_PASSTHRU(0, "verify", &opt_verify, NULL,
N_("control use of pre-merge-commit and commit-msg hooks"),
PARSE_OPT_NOARG),
OPT_PASSTHRU(0, "verify-signatures", &opt_verify_signatures, NULL,
N_("verify that the named commit has a valid GPG signature"),
PARSE_OPT_NOARG),
OPT_BOOL(0, "autostash", &opt_autostash,
N_("automatically stash/stash pop before and after")),
OPT_PASSTHRU_ARGV('s', "strategy", &opt_strategies, N_("strategy"),
N_("merge strategy to use"),
0),
OPT_PASSTHRU_ARGV('X', "strategy-option", &opt_strategy_opts,
N_("option=value"),
N_("option for selected merge strategy"),
0),
OPT_PASSTHRU('S', "gpg-sign", &opt_gpg_sign, N_("key-id"),
N_("GPG sign commit"),
PARSE_OPT_OPTARG),
OPT_SET_INT(0, "allow-unrelated-histories",
&opt_allow_unrelated_histories,
N_("allow merging unrelated histories"), 1),
/* Options passed to git-fetch */
OPT_GROUP(N_("Options related to fetching")),
OPT_PASSTHRU(0, "all", &opt_all, NULL,
N_("fetch from all remotes"),
PARSE_OPT_NOARG),
OPT_PASSTHRU('a', "append", &opt_append, NULL,
N_("append to .git/FETCH_HEAD instead of overwriting"),
PARSE_OPT_NOARG),
OPT_PASSTHRU(0, "upload-pack", &opt_upload_pack, N_("path"),
N_("path to upload pack on remote end"),
0),
OPT__FORCE(&opt_force, N_("force overwrite of local branch"), 0),
OPT_PASSTHRU('t', "tags", &opt_tags, NULL,
N_("fetch all tags and associated objects"),
PARSE_OPT_NOARG),
OPT_PASSTHRU('p', "prune", &opt_prune, NULL,
N_("prune remote-tracking branches no longer on remote"),
PARSE_OPT_NOARG),
OPT_PASSTHRU('j', "jobs", &max_children, N_("n"),
N_("number of submodules pulled in parallel"),
PARSE_OPT_OPTARG),
OPT_BOOL(0, "dry-run", &opt_dry_run,
N_("dry run")),
OPT_PASSTHRU('k', "keep", &opt_keep, NULL,
N_("keep downloaded pack"),
PARSE_OPT_NOARG),
OPT_PASSTHRU(0, "depth", &opt_depth, N_("depth"),
N_("deepen history of shallow clone"),
0),
OPT_PASSTHRU_ARGV(0, "shallow-since", &opt_fetch, N_("time"),
N_("deepen history of shallow repository based on time"),
0),
OPT_PASSTHRU_ARGV(0, "shallow-exclude", &opt_fetch, N_("ref"),
N_("deepen history of shallow clone, excluding ref"),
0),
OPT_PASSTHRU_ARGV(0, "deepen", &opt_fetch, N_("n"),
N_("deepen history of shallow clone"),
0),
OPT_PASSTHRU(0, "unshallow", &opt_unshallow, NULL,
N_("convert to a complete repository"),
PARSE_OPT_NONEG | PARSE_OPT_NOARG),
OPT_PASSTHRU(0, "update-shallow", &opt_update_shallow, NULL,
N_("accept refs that update .git/shallow"),
PARSE_OPT_NOARG),
OPT_PASSTHRU(0, "refmap", &opt_refmap, N_("refmap"),
N_("specify fetch refmap"),
PARSE_OPT_NONEG),
OPT_PASSTHRU_ARGV('o', "server-option", &opt_fetch,
N_("server-specific"),
N_("option to transmit"),
0),
OPT_PASSTHRU('4', "ipv4", &opt_ipv4, NULL,
N_("use IPv4 addresses only"),
PARSE_OPT_NOARG),
OPT_PASSTHRU('6', "ipv6", &opt_ipv6, NULL,
N_("use IPv6 addresses only"),
PARSE_OPT_NOARG),
OPT_PASSTHRU_ARGV(0, "negotiation-tip", &opt_fetch, N_("revision"),
N_("report that we have only objects reachable from this object"),
0),
OPT_BOOL(0, "show-forced-updates", &opt_show_forced_updates,
N_("check for forced-updates on all updated branches")),
OPT_PASSTHRU(0, "set-upstream", &set_upstream, NULL,
N_("set upstream for git pull/fetch"),
PARSE_OPT_NOARG),
OPT_END()
};
if (!getenv("GIT_REFLOG_ACTION"))
set_reflog_message(argc, argv);

View File

@ -34,6 +34,7 @@
#include "object-file.h"
#include "object-name.h"
#include "odb.h"
#include "path.h"
#include "protocol.h"
#include "commit-reach.h"
#include "server-info.h"
@ -41,7 +42,6 @@
#include "trace2.h"
#include "worktree.h"
#include "shallow.h"
#include "setup.h"
#include "parse-options.h"
static const char * const receive_pack_usage[] = {
@ -177,9 +177,8 @@ static int receive_pack_config(const char *var, const char *value,
if (git_config_pathname(&path, var, value))
return -1;
if (path)
strbuf_addf(&fsck_msg_types, "%cskiplist=%s",
fsck_msg_types.len ? ',' : '=', path);
strbuf_addf(&fsck_msg_types, "%cskiplist=%s",
fsck_msg_types.len ? ',' : '=', path);
free(path);
return 0;
}
@ -306,12 +305,13 @@ static void show_ref(const char *path, const struct object_id *oid)
}
}
static int show_ref_cb(const struct reference *ref, void *data)
static int show_ref_cb(const char *path_full, const char *referent UNUSED, const struct object_id *oid,
int flag UNUSED, void *data)
{
struct oidset *seen = data;
const char *path = strip_namespace(ref->name);
const char *path = strip_namespace(path_full);
if (ref_is_hidden(path, ref->name, &hidden_refs))
if (ref_is_hidden(path, path_full, &hidden_refs))
return 0;
/*
@ -320,13 +320,13 @@ static int show_ref_cb(const struct reference *ref, void *data)
* transfer but will otherwise ignore them.
*/
if (!path) {
if (oidset_insert(seen, ref->oid))
if (oidset_insert(seen, oid))
return 0;
path = ".have";
} else {
oidset_insert(seen, ref->oid);
oidset_insert(seen, oid);
}
show_ref(path, ref->oid);
show_ref(path, oid);
return 0;
}
@ -749,7 +749,7 @@ static int check_cert_push_options(const struct string_list *push_options)
return retval;
}
static void prepare_push_cert_sha1(struct run_hooks_opt *opt)
static void prepare_push_cert_sha1(struct child_process *proc)
{
static int already_done;
@ -775,23 +775,23 @@ static void prepare_push_cert_sha1(struct run_hooks_opt *opt)
nonce_status = check_nonce(sigcheck.payload);
}
if (!is_null_oid(&push_cert_oid)) {
strvec_pushf(&opt->env, "GIT_PUSH_CERT=%s",
strvec_pushf(&proc->env, "GIT_PUSH_CERT=%s",
oid_to_hex(&push_cert_oid));
strvec_pushf(&opt->env, "GIT_PUSH_CERT_SIGNER=%s",
strvec_pushf(&proc->env, "GIT_PUSH_CERT_SIGNER=%s",
sigcheck.signer ? sigcheck.signer : "");
strvec_pushf(&opt->env, "GIT_PUSH_CERT_KEY=%s",
strvec_pushf(&proc->env, "GIT_PUSH_CERT_KEY=%s",
sigcheck.key ? sigcheck.key : "");
strvec_pushf(&opt->env, "GIT_PUSH_CERT_STATUS=%c",
strvec_pushf(&proc->env, "GIT_PUSH_CERT_STATUS=%c",
sigcheck.result);
if (push_cert_nonce) {
strvec_pushf(&opt->env,
strvec_pushf(&proc->env,
"GIT_PUSH_CERT_NONCE=%s",
push_cert_nonce);
strvec_pushf(&opt->env,
strvec_pushf(&proc->env,
"GIT_PUSH_CERT_NONCE_STATUS=%s",
nonce_status);
if (nonce_status == NONCE_SLOP)
strvec_pushf(&opt->env,
strvec_pushf(&proc->env,
"GIT_PUSH_CERT_NONCE_SLOP=%ld",
nonce_stamp_slop);
}
@ -803,74 +803,119 @@ struct receive_hook_feed_state {
struct ref_push_report *report;
int skip_broken;
struct strbuf buf;
const struct string_list *push_options;
};
static int feed_receive_hook_cb(int hook_stdin_fd, void *pp_cb UNUSED, void *pp_task_cb)
typedef int (*feed_fn)(void *, const char **, size_t *);
static int run_and_feed_hook(const char *hook_name, feed_fn feed,
struct receive_hook_feed_state *feed_state)
{
struct receive_hook_feed_state *state = pp_task_cb;
struct command *cmd = state->cmd;
unsigned int lines_batch_size = 500;
struct child_process proc = CHILD_PROCESS_INIT;
struct async muxer;
int code;
const char *hook_path = find_hook(the_repository, hook_name);
strbuf_reset(&state->buf);
if (!hook_path)
return 0;
/* batch lines to avoid going through run-command's poll loop for each line */
for (unsigned int i = 0; i < lines_batch_size; i++) {
while (cmd &&
state->skip_broken && (cmd->error_string || cmd->did_not_exist))
cmd = cmd->next;
strvec_push(&proc.args, hook_path);
proc.in = -1;
proc.stdout_to_stderr = 1;
proc.trace2_hook_name = hook_name;
if (!cmd)
break; /* no more commands left */
if (feed_state->push_options) {
size_t i;
for (i = 0; i < feed_state->push_options->nr; i++)
strvec_pushf(&proc.env,
"GIT_PUSH_OPTION_%"PRIuMAX"=%s",
(uintmax_t)i,
feed_state->push_options->items[i].string);
strvec_pushf(&proc.env, "GIT_PUSH_OPTION_COUNT=%"PRIuMAX"",
(uintmax_t)feed_state->push_options->nr);
} else
strvec_pushf(&proc.env, "GIT_PUSH_OPTION_COUNT");
if (!state->report)
state->report = cmd->report;
if (tmp_objdir)
strvec_pushv(&proc.env, tmp_objdir_env(tmp_objdir));
if (state->report) {
struct object_id *old_oid;
struct object_id *new_oid;
const char *ref_name;
old_oid = state->report->old_oid ? state->report->old_oid : &cmd->old_oid;
new_oid = state->report->new_oid ? state->report->new_oid : &cmd->new_oid;
ref_name = state->report->ref_name ? state->report->ref_name : cmd->ref_name;
strbuf_addf(&state->buf, "%s %s %s\n",
oid_to_hex(old_oid), oid_to_hex(new_oid),
ref_name);
state->report = state->report->next;
if (!state->report)
cmd = cmd->next;
} else {
strbuf_addf(&state->buf, "%s %s %s\n",
oid_to_hex(&cmd->old_oid), oid_to_hex(&cmd->new_oid),
cmd->ref_name);
cmd = cmd->next;
}
if (use_sideband) {
memset(&muxer, 0, sizeof(muxer));
muxer.proc = copy_to_sideband;
muxer.in = -1;
code = start_async(&muxer);
if (code)
return code;
proc.err = muxer.in;
}
state->cmd = cmd;
prepare_push_cert_sha1(&proc);
if (state->buf.len > 0) {
int ret = write_in_full(hook_stdin_fd, state->buf.buf, state->buf.len);
if (ret < 0) {
if (errno == EPIPE)
return 1; /* child closed pipe */
return ret;
}
code = start_command(&proc);
if (code) {
if (use_sideband)
finish_async(&muxer);
return code;
}
return state->cmd ? 0 : 1; /* 0 = more to come, 1 = EOF */
sigchain_push(SIGPIPE, SIG_IGN);
while (1) {
const char *buf;
size_t n;
if (feed(feed_state, &buf, &n))
break;
if (write_in_full(proc.in, buf, n) < 0)
break;
}
close(proc.in);
if (use_sideband)
finish_async(&muxer);
sigchain_pop(SIGPIPE);
return finish_command(&proc);
}
static void hook_output_to_sideband(struct strbuf *output, void *cb_data UNUSED)
static int feed_receive_hook(void *state_, const char **bufp, size_t *sizep)
{
if (!output)
BUG("output must be non-NULL");
struct receive_hook_feed_state *state = state_;
struct command *cmd = state->cmd;
/* buffer might be empty for keepalives */
if (output->len)
send_sideband(1, 2, output->buf, output->len, use_sideband);
while (cmd &&
state->skip_broken && (cmd->error_string || cmd->did_not_exist))
cmd = cmd->next;
if (!cmd)
return -1; /* EOF */
if (!bufp)
return 0; /* OK, can feed something. */
strbuf_reset(&state->buf);
if (!state->report)
state->report = cmd->report;
if (state->report) {
struct object_id *old_oid;
struct object_id *new_oid;
const char *ref_name;
old_oid = state->report->old_oid ? state->report->old_oid : &cmd->old_oid;
new_oid = state->report->new_oid ? state->report->new_oid : &cmd->new_oid;
ref_name = state->report->ref_name ? state->report->ref_name : cmd->ref_name;
strbuf_addf(&state->buf, "%s %s %s\n",
oid_to_hex(old_oid), oid_to_hex(new_oid),
ref_name);
state->report = state->report->next;
if (!state->report)
state->cmd = cmd->next;
} else {
strbuf_addf(&state->buf, "%s %s %s\n",
oid_to_hex(&cmd->old_oid), oid_to_hex(&cmd->new_oid),
cmd->ref_name);
state->cmd = cmd->next;
}
if (bufp) {
*bufp = state->buf.buf;
*sizep = state->buf.len;
}
return 0;
}
static int run_receive_hook(struct command *commands,
@ -878,65 +923,47 @@ static int run_receive_hook(struct command *commands,
int skip_broken,
const struct string_list *push_options)
{
struct run_hooks_opt opt = RUN_HOOKS_OPT_INIT;
struct command *iter = commands;
struct receive_hook_feed_state feed_state;
int ret;
struct receive_hook_feed_state state;
int status;
/* if there are no valid commands, don't invoke the hook at all. */
while (iter && skip_broken && (iter->error_string || iter->did_not_exist))
iter = iter->next;
if (!iter)
strbuf_init(&state.buf, 0);
state.cmd = commands;
state.skip_broken = skip_broken;
state.report = NULL;
if (feed_receive_hook(&state, NULL, NULL))
return 0;
if (push_options) {
for (int i = 0; i < push_options->nr; i++)
strvec_pushf(&opt.env, "GIT_PUSH_OPTION_%d=%s", i,
push_options->items[i].string);
strvec_pushf(&opt.env, "GIT_PUSH_OPTION_COUNT=%"PRIuMAX"",
(uintmax_t)push_options->nr);
} else {
strvec_push(&opt.env, "GIT_PUSH_OPTION_COUNT");
}
if (tmp_objdir)
strvec_pushv(&opt.env, tmp_objdir_env(tmp_objdir));
prepare_push_cert_sha1(&opt);
/* set up sideband printer */
if (use_sideband)
opt.consume_output = hook_output_to_sideband;
/* set up stdin callback */
feed_state.cmd = commands;
feed_state.skip_broken = skip_broken;
feed_state.report = NULL;
strbuf_init(&feed_state.buf, 0);
opt.feed_pipe_cb_data = &feed_state;
opt.feed_pipe = feed_receive_hook_cb;
ret = run_hooks_opt(the_repository, hook_name, &opt);
strbuf_release(&feed_state.buf);
return ret;
state.cmd = commands;
state.push_options = push_options;
status = run_and_feed_hook(hook_name, feed_receive_hook, &state);
strbuf_release(&state.buf);
return status;
}
static int run_update_hook(struct command *cmd)
{
struct run_hooks_opt opt = RUN_HOOKS_OPT_INIT;
struct child_process proc = CHILD_PROCESS_INIT;
int code;
const char *hook_path = find_hook(the_repository, "update");
strvec_pushl(&opt.args,
cmd->ref_name,
oid_to_hex(&cmd->old_oid),
oid_to_hex(&cmd->new_oid),
NULL);
if (!hook_path)
return 0;
strvec_push(&proc.args, hook_path);
strvec_push(&proc.args, cmd->ref_name);
strvec_push(&proc.args, oid_to_hex(&cmd->old_oid));
strvec_push(&proc.args, oid_to_hex(&cmd->new_oid));
proc.no_stdin = 1;
proc.stdout_to_stderr = 1;
proc.err = use_sideband ? -1 : 0;
proc.trace2_hook_name = "update";
code = start_command(&proc);
if (code)
return code;
if (use_sideband)
opt.consume_output = hook_output_to_sideband;
return run_hooks_opt(the_repository, "update", &opt);
copy_to_sideband(proc.err, -1, NULL);
return finish_command(&proc);
}
static struct command *find_command_by_refname(struct command *list,
@ -1613,20 +1640,33 @@ out:
static void run_update_post_hook(struct command *commands)
{
struct command *cmd;
struct run_hooks_opt opt = RUN_HOOKS_OPT_INIT;
struct child_process proc = CHILD_PROCESS_INIT;
const char *hook;
hook = find_hook(the_repository, "post-update");
if (!hook)
return;
for (cmd = commands; cmd; cmd = cmd->next) {
if (cmd->error_string || cmd->did_not_exist)
continue;
strvec_push(&opt.args, cmd->ref_name);
if (!proc.args.nr)
strvec_push(&proc.args, hook);
strvec_push(&proc.args, cmd->ref_name);
}
if (!opt.args.nr)
if (!proc.args.nr)
return;
if (use_sideband)
opt.consume_output = hook_output_to_sideband;
proc.no_stdin = 1;
proc.stdout_to_stderr = 1;
proc.err = use_sideband ? -1 : 0;
proc.trace2_hook_name = "post-update";
run_hooks_opt(the_repository, "post-update", &opt);
if (!start_command(&proc)) {
if (use_sideband)
copy_to_sideband(proc.err, -1, NULL);
finish_command(&proc);
}
}
static void check_aliased_update_internal(struct command *cmd,

View File

@ -570,14 +570,17 @@ struct branches_for_remote {
struct known_remotes *keep;
};
static int add_branch_for_removal(const struct reference *ref, void *cb_data)
static int add_branch_for_removal(const char *refname,
const char *referent UNUSED,
const struct object_id *oid UNUSED,
int flags UNUSED, void *cb_data)
{
struct branches_for_remote *branches = cb_data;
struct refspec_item refspec;
struct known_remote *kr;
memset(&refspec, 0, sizeof(refspec));
refspec.dst = (char *)ref->name;
refspec.dst = (char *)refname;
if (remote_find_tracking(branches->remote, &refspec))
return 0;
free(refspec.src);
@ -585,7 +588,7 @@ static int add_branch_for_removal(const struct reference *ref, void *cb_data)
/* don't delete a branch if another remote also uses it */
for (kr = branches->keep->list; kr; kr = kr->next) {
memset(&refspec, 0, sizeof(refspec));
refspec.dst = (char *)ref->name;
refspec.dst = (char *)refname;
if (!remote_find_tracking(kr->remote, &refspec)) {
free(refspec.src);
return 0;
@ -593,16 +596,16 @@ static int add_branch_for_removal(const struct reference *ref, void *cb_data)
}
/* don't delete non-remote-tracking refs */
if (!starts_with(ref->name, "refs/remotes/")) {
if (!starts_with(refname, "refs/remotes/")) {
/* advise user how to delete local branches */
if (starts_with(ref->name, "refs/heads/"))
if (starts_with(refname, "refs/heads/"))
string_list_append(branches->skipped,
abbrev_branch(ref->name));
abbrev_branch(refname));
/* silently skip over other non-remote refs */
return 0;
}
string_list_append(branches->branches, ref->name);
string_list_append(branches->branches, refname);
return 0;
}
@ -710,18 +713,18 @@ out:
return error;
}
static int rename_one_ref(const struct reference *ref, void *cb_data)
static int rename_one_ref(const char *old_refname, const char *referent,
const struct object_id *oid,
int flags, void *cb_data)
{
struct strbuf new_referent = STRBUF_INIT;
struct strbuf new_refname = STRBUF_INIT;
struct rename_info *rename = cb_data;
const struct object_id *oid = ref->oid;
const char *referent = ref->target;
int error;
compute_renamed_ref(rename, ref->name, &new_refname);
compute_renamed_ref(rename, old_refname, &new_refname);
if (ref->flags & REF_ISSYMREF) {
if (flags & REF_ISSYMREF) {
/*
* Stupidly enough `referent` is not pointing to the immediate
* target of a symref, but it's the recursively resolved value.
@ -729,25 +732,25 @@ static int rename_one_ref(const struct reference *ref, void *cb_data)
* unborn symrefs don't have any value for the `referent` at all.
*/
referent = refs_resolve_ref_unsafe(get_main_ref_store(the_repository),
ref->name, RESOLVE_REF_NO_RECURSE,
old_refname, RESOLVE_REF_NO_RECURSE,
NULL, NULL);
compute_renamed_ref(rename, referent, &new_referent);
oid = NULL;
}
error = ref_transaction_delete(rename->transaction, ref->name,
error = ref_transaction_delete(rename->transaction, old_refname,
oid, referent, REF_NO_DEREF, NULL, rename->err);
if (error < 0)
goto out;
error = ref_transaction_update(rename->transaction, new_refname.buf, oid, null_oid(the_hash_algo),
(ref->flags & REF_ISSYMREF) ? new_referent.buf : NULL, NULL,
(flags & REF_ISSYMREF) ? new_referent.buf : NULL, NULL,
REF_SKIP_CREATE_REFLOG | REF_NO_DEREF | REF_SKIP_OID_VERIFICATION,
NULL, rename->err);
if (error < 0)
goto out;
error = rename_one_reflog(ref->name, oid, rename);
error = rename_one_reflog(old_refname, oid, rename);
if (error < 0)
goto out;
@ -1122,16 +1125,19 @@ static void free_remote_ref_states(struct ref_states *states)
string_list_clear_func(&states->push, clear_push_info);
}
static int append_ref_to_tracked_list(const struct reference *ref, void *cb_data)
static int append_ref_to_tracked_list(const char *refname,
const char *referent UNUSED,
const struct object_id *oid UNUSED,
int flags, void *cb_data)
{
struct ref_states *states = cb_data;
struct refspec_item refspec;
if (ref->flags & REF_ISSYMREF)
if (flags & REF_ISSYMREF)
return 0;
memset(&refspec, 0, sizeof(refspec));
refspec.dst = (char *)ref->name;
refspec.dst = (char *)refname;
if (!remote_find_tracking(states->remote, &refspec)) {
string_list_append(&states->tracked, abbrev_branch(refspec.src));
free(refspec.src);

View File

@ -488,7 +488,7 @@ int cmd_repack(int argc,
string_list_sort(&names);
odb_close(repo->objects);
close_object_store(repo->objects);
/*
* Ok we have prepared all new packfiles.

View File

@ -47,27 +47,30 @@ struct show_data {
enum replace_format format;
};
static int show_reference(const struct reference *ref, void *cb_data)
static int show_reference(const char *refname,
const char *referent UNUSED,
const struct object_id *oid,
int flag UNUSED, void *cb_data)
{
struct show_data *data = cb_data;
if (!wildmatch(data->pattern, ref->name, 0)) {
if (!wildmatch(data->pattern, refname, 0)) {
if (data->format == REPLACE_FORMAT_SHORT)
printf("%s\n", ref->name);
printf("%s\n", refname);
else if (data->format == REPLACE_FORMAT_MEDIUM)
printf("%s -> %s\n", ref->name, oid_to_hex(ref->oid));
printf("%s -> %s\n", refname, oid_to_hex(oid));
else { /* data->format == REPLACE_FORMAT_LONG */
struct object_id object;
enum object_type obj_type, repl_type;
if (repo_get_oid(data->repo, ref->name, &object))
return error(_("failed to resolve '%s' as a valid ref"), ref->name);
if (repo_get_oid(data->repo, refname, &object))
return error(_("failed to resolve '%s' as a valid ref"), refname);
obj_type = odb_read_object_info(data->repo->objects, &object, NULL);
repl_type = odb_read_object_info(data->repo->objects, ref->oid, NULL);
repl_type = odb_read_object_info(data->repo->objects, oid, NULL);
printf("%s (%s) -> %s (%s)\n", ref->name, type_name(obj_type),
oid_to_hex(ref->oid), type_name(repl_type));
printf("%s (%s) -> %s (%s)\n", refname, type_name(obj_type),
oid_to_hex(oid), type_name(repl_type));
}
}

View File

@ -8,7 +8,6 @@
#include "git-compat-util.h"
#include "builtin.h"
#include "config.h"
#include "environment.h"
#include "hex.h"
#include "lockfile.h"
@ -21,11 +20,6 @@
#include <oidset.h>
#include <tree.h>
enum ref_action_mode {
REF_ACTION_UPDATE,
REF_ACTION_PRINT,
};
static const char *short_commit_name(struct repository *repo,
struct commit *commit)
{
@ -73,7 +67,7 @@ static struct commit *create_commit(struct repository *repo,
const char *message = repo_logmsg_reencode(repo, based_on,
NULL, out_enc);
const char *orig_message = NULL;
const char *exclude_gpgsig[] = { "gpgsig", "gpgsig-sha256", NULL };
const char *exclude_gpgsig[] = { "gpgsig", NULL };
commit_list_insert(parent, &parents);
extra = read_commit_extra_headers(based_on, exclude_gpgsig);
@ -290,54 +284,6 @@ static struct commit *pick_regular_commit(struct repository *repo,
return create_commit(repo, result->tree, pickme, replayed_base);
}
static enum ref_action_mode parse_ref_action_mode(const char *ref_action, const char *source)
{
if (!ref_action || !strcmp(ref_action, "update"))
return REF_ACTION_UPDATE;
if (!strcmp(ref_action, "print"))
return REF_ACTION_PRINT;
die(_("invalid %s value: '%s'"), source, ref_action);
}
static enum ref_action_mode get_ref_action_mode(struct repository *repo, const char *ref_action)
{
const char *config_value = NULL;
/* Command line option takes precedence */
if (ref_action)
return parse_ref_action_mode(ref_action, "--ref-action");
/* Check config value */
if (!repo_config_get_string_tmp(repo, "replay.refAction", &config_value))
return parse_ref_action_mode(config_value, "replay.refAction");
/* Default to update mode */
return REF_ACTION_UPDATE;
}
static int handle_ref_update(enum ref_action_mode mode,
struct ref_transaction *transaction,
const char *refname,
const struct object_id *new_oid,
const struct object_id *old_oid,
const char *reflog_msg,
struct strbuf *err)
{
switch (mode) {
case REF_ACTION_PRINT:
printf("update %s %s %s\n",
refname,
oid_to_hex(new_oid),
oid_to_hex(old_oid));
return 0;
case REF_ACTION_UPDATE:
return ref_transaction_update(transaction, refname, new_oid, old_oid,
NULL, NULL, 0, reflog_msg, err);
default:
BUG("unknown ref_action_mode %d", mode);
}
}
int cmd_replay(int argc,
const char **argv,
const char *prefix,
@ -348,8 +294,6 @@ int cmd_replay(int argc,
struct commit *onto = NULL;
const char *onto_name = NULL;
int contained = 0;
const char *ref_action = NULL;
enum ref_action_mode ref_mode;
struct rev_info revs;
struct commit *last_commit = NULL;
@ -358,15 +302,12 @@ int cmd_replay(int argc,
struct merge_result result;
struct strset *update_refs = NULL;
kh_oid_map_t *replayed_commits;
struct ref_transaction *transaction = NULL;
struct strbuf transaction_err = STRBUF_INIT;
struct strbuf reflog_msg = STRBUF_INIT;
int ret = 0;
const char *const replay_usage[] = {
const char * const replay_usage[] = {
N_("(EXPERIMENTAL!) git replay "
"([--contained] --onto <newbase> | --advance <branch>) "
"[--ref-action[=<mode>]] <revision-range>"),
"<revision-range>..."),
NULL
};
struct option replay_options[] = {
@ -377,10 +318,7 @@ int cmd_replay(int argc,
N_("revision"),
N_("replay onto given commit")),
OPT_BOOL(0, "contained", &contained,
N_("update all branches that point at commits in <revision-range>")),
OPT_STRING(0, "ref-action", &ref_action,
N_("mode"),
N_("control ref update behavior (update|print)")),
N_("advance all branches contained in revision-range")),
OPT_END()
};
@ -392,12 +330,9 @@ int cmd_replay(int argc,
usage_with_options(replay_usage, replay_options);
}
die_for_incompatible_opt2(!!advance_name_opt, "--advance",
contained, "--contained");
/* Parse ref action mode from command line or config */
ref_mode = get_ref_action_mode(repo, ref_action);
if (advance_name_opt && contained)
die(_("options '%s' and '%s' cannot be used together"),
"--advance", "--contained");
advance_name = xstrdup_or_null(advance_name_opt);
repo_init_revisions(repo, &revs, prefix);
@ -457,24 +392,6 @@ int cmd_replay(int argc,
if (!onto) /* FIXME: Should handle replaying down to root commit */
die("Replaying down to root commit is not supported yet!");
/* Build reflog message */
if (advance_name_opt)
strbuf_addf(&reflog_msg, "replay --advance %s", advance_name_opt);
else
strbuf_addf(&reflog_msg, "replay --onto %s",
oid_to_hex(&onto->object.oid));
/* Initialize ref transaction if using update mode */
if (ref_mode == REF_ACTION_UPDATE) {
transaction = ref_store_transaction_begin(get_main_ref_store(repo),
0, &transaction_err);
if (!transaction) {
ret = error(_("failed to begin ref transaction: %s"),
transaction_err.buf);
goto cleanup;
}
}
if (prepare_revision_walk(&revs) < 0) {
ret = error(_("error preparing revisions"));
goto cleanup;
@ -517,16 +434,10 @@ int cmd_replay(int argc,
if (decoration->type == DECORATION_REF_LOCAL &&
(contained || strset_contains(update_refs,
decoration->name))) {
if (handle_ref_update(ref_mode, transaction,
decoration->name,
&last_commit->object.oid,
&commit->object.oid,
reflog_msg.buf,
&transaction_err) < 0) {
ret = error(_("failed to update ref '%s': %s"),
decoration->name, transaction_err.buf);
goto cleanup;
}
printf("update %s %s %s\n",
decoration->name,
oid_to_hex(&last_commit->object.oid),
oid_to_hex(&commit->object.oid));
}
decoration = decoration->next;
}
@ -534,24 +445,10 @@ int cmd_replay(int argc,
/* In --advance mode, advance the target ref */
if (result.clean == 1 && advance_name) {
if (handle_ref_update(ref_mode, transaction, advance_name,
&last_commit->object.oid,
&onto->object.oid,
reflog_msg.buf,
&transaction_err) < 0) {
ret = error(_("failed to update ref '%s': %s"),
advance_name, transaction_err.buf);
goto cleanup;
}
}
/* Commit the ref transaction if we have one */
if (transaction && result.clean == 1) {
if (ref_transaction_commit(transaction, &transaction_err)) {
ret = error(_("failed to commit ref transaction: %s"),
transaction_err.buf);
goto cleanup;
}
printf("update %s %s %s\n",
advance_name,
oid_to_hex(&last_commit->object.oid),
oid_to_hex(&onto->object.oid));
}
merge_finalize(&merge_opt, &result);
@ -563,10 +460,6 @@ int cmd_replay(int argc,
ret = result.clean;
cleanup:
if (transaction)
ref_transaction_free(transaction);
strbuf_release(&transaction_err);
strbuf_release(&reflog_msg);
release_revisions(&revs);
free(advance_name);

View File

@ -2,8 +2,6 @@
#include "builtin.h"
#include "environment.h"
#include "hex.h"
#include "odb.h"
#include "parse-options.h"
#include "path-walk.h"
#include "progress.h"
@ -17,8 +15,8 @@
#include "utf8.h"
static const char *const repo_usage[] = {
"git repo info [--format=(keyvalue|nul) | -z] [--all | <key>...]",
"git repo structure [--format=(table|keyvalue|nul) | -z]",
"git repo info [--format=(keyvalue|nul)] [-z] [<key>...]",
"git repo structure [--format=(table|keyvalue|nul)]",
NULL
};
@ -87,29 +85,13 @@ static get_value_fn *get_value_fn_for_key(const char *key)
return found ? found->get_value : NULL;
}
static void print_field(enum output_format format, const char *key,
const char *value)
{
switch (format) {
case FORMAT_KEYVALUE:
printf("%s=", key);
quote_c_style(value, NULL, stdout, 0);
putchar('\n');
break;
case FORMAT_NUL_TERMINATED:
printf("%s\n%s%c", key, value, '\0');
break;
default:
BUG("not a valid output format: %d", format);
}
}
static int print_fields(int argc, const char **argv,
struct repository *repo,
enum output_format format)
{
int ret = 0;
struct strbuf valbuf = STRBUF_INIT;
struct strbuf quotbuf = STRBUF_INIT;
for (int i = 0; i < argc; i++) {
get_value_fn *get_value;
@ -123,31 +105,28 @@ static int print_fields(int argc, const char **argv,
}
strbuf_reset(&valbuf);
strbuf_reset(&quotbuf);
get_value(repo, &valbuf);
print_field(format, key, valbuf.buf);
switch (format) {
case FORMAT_KEYVALUE:
quote_c_style(valbuf.buf, &quotbuf, NULL, 0);
printf("%s=%s\n", key, quotbuf.buf);
break;
case FORMAT_NUL_TERMINATED:
printf("%s\n%s%c", key, valbuf.buf, '\0');
break;
default:
BUG("not a valid output format: %d", format);
}
}
strbuf_release(&valbuf);
strbuf_release(&quotbuf);
return ret;
}
static int print_all_fields(struct repository *repo,
enum output_format format)
{
struct strbuf valbuf = STRBUF_INIT;
for (size_t i = 0; i < ARRAY_SIZE(repo_info_fields); i++) {
const struct field *field = &repo_info_fields[i];
strbuf_reset(&valbuf);
field->get_value(repo, &valbuf);
print_field(format, field->key, valbuf.buf);
}
strbuf_release(&valbuf);
return 0;
}
static int parse_format_cb(const struct option *opt,
const char *arg, int unset UNUSED)
{
@ -171,7 +150,6 @@ static int cmd_repo_info(int argc, const char **argv, const char *prefix,
struct repository *repo)
{
enum output_format format = FORMAT_KEYVALUE;
int all_keys = 0;
struct option options[] = {
OPT_CALLBACK_F(0, "format", &format, N_("format"),
N_("output format"),
@ -180,7 +158,6 @@ static int cmd_repo_info(int argc, const char **argv, const char *prefix,
N_("synonym for --format=nul"),
PARSE_OPT_NONEG | PARSE_OPT_NOARG,
parse_format_cb),
OPT_BOOL(0, "all", &all_keys, N_("print all keys/values")),
OPT_END()
};
@ -188,13 +165,7 @@ static int cmd_repo_info(int argc, const char **argv, const char *prefix,
if (format != FORMAT_KEYVALUE && format != FORMAT_NUL_TERMINATED)
die(_("unsupported output format"));
if (all_keys && argc)
die(_("--all and <key> cannot be used together"));
if (all_keys)
return print_all_fields(repo, format);
else
return print_fields(argc, argv, repo, format);
return print_fields(argc, argv, repo, format);
}
struct ref_stats {
@ -204,19 +175,13 @@ struct ref_stats {
size_t others;
};
struct object_values {
struct object_stats {
size_t tags;
size_t commits;
size_t trees;
size_t blobs;
};
struct object_stats {
struct object_values type_counts;
struct object_values inflated_sizes;
struct object_values disk_sizes;
};
struct repo_structure {
struct ref_stats refs;
struct object_stats objects;
@ -227,7 +192,6 @@ struct stats_table {
int name_col_width;
int value_col_width;
int unit_col_width;
};
/*
@ -235,7 +199,6 @@ struct stats_table {
*/
struct stats_table_entry {
char *value;
const char *unit;
};
static void stats_table_vaddf(struct stats_table *table,
@ -256,18 +219,11 @@ static void stats_table_vaddf(struct stats_table *table,
if (name_width > table->name_col_width)
table->name_col_width = name_width;
if (!entry)
return;
if (entry->value) {
if (entry) {
int value_width = utf8_strwidth(entry->value);
if (value_width > table->value_col_width)
table->value_col_width = value_width;
}
if (entry->unit) {
int unit_width = utf8_strwidth(entry->unit);
if (unit_width > table->unit_col_width)
table->unit_col_width = unit_width;
}
}
static void stats_table_addf(struct stats_table *table, const char *format, ...)
@ -286,21 +242,7 @@ static void stats_table_count_addf(struct stats_table *table, size_t value,
va_list ap;
CALLOC_ARRAY(entry, 1);
humanise_count(value, &entry->value, &entry->unit);
va_start(ap, format);
stats_table_vaddf(table, entry, format, ap);
va_end(ap);
}
static void stats_table_size_addf(struct stats_table *table, size_t value,
const char *format, ...)
{
struct stats_table_entry *entry;
va_list ap;
CALLOC_ARRAY(entry, 1);
humanise_bytes(value, &entry->value, &entry->unit, HUMANISE_COMPACT);
entry->value = xstrfmt("%" PRIuMAX, (uintmax_t)value);
va_start(ap, format);
stats_table_vaddf(table, entry, format, ap);
@ -312,9 +254,9 @@ static inline size_t get_total_reference_count(struct ref_stats *stats)
return stats->branches + stats->remotes + stats->tags + stats->others;
}
static inline size_t get_total_object_values(struct object_values *values)
static inline size_t get_total_object_count(struct object_stats *stats)
{
return values->tags + values->commits + values->trees + values->blobs;
return stats->tags + stats->commits + stats->trees + stats->blobs;
}
static void stats_table_setup_structure(struct stats_table *table,
@ -322,9 +264,7 @@ static void stats_table_setup_structure(struct stats_table *table,
{
struct object_stats *objects = &stats->objects;
struct ref_stats *refs = &stats->refs;
size_t inflated_object_total;
size_t object_count_total;
size_t disk_object_total;
size_t object_total;
size_t ref_total;
ref_total = get_total_reference_count(refs);
@ -335,101 +275,51 @@ static void stats_table_setup_structure(struct stats_table *table,
stats_table_count_addf(table, refs->remotes, " * %s", _("Remotes"));
stats_table_count_addf(table, refs->others, " * %s", _("Others"));
object_count_total = get_total_object_values(&objects->type_counts);
object_total = get_total_object_count(objects);
stats_table_addf(table, "");
stats_table_addf(table, "* %s", _("Reachable objects"));
stats_table_count_addf(table, object_count_total, " * %s", _("Count"));
stats_table_count_addf(table, objects->type_counts.commits,
" * %s", _("Commits"));
stats_table_count_addf(table, objects->type_counts.trees,
" * %s", _("Trees"));
stats_table_count_addf(table, objects->type_counts.blobs,
" * %s", _("Blobs"));
stats_table_count_addf(table, objects->type_counts.tags,
" * %s", _("Tags"));
inflated_object_total = get_total_object_values(&objects->inflated_sizes);
stats_table_size_addf(table, inflated_object_total,
" * %s", _("Inflated size"));
stats_table_size_addf(table, objects->inflated_sizes.commits,
" * %s", _("Commits"));
stats_table_size_addf(table, objects->inflated_sizes.trees,
" * %s", _("Trees"));
stats_table_size_addf(table, objects->inflated_sizes.blobs,
" * %s", _("Blobs"));
stats_table_size_addf(table, objects->inflated_sizes.tags,
" * %s", _("Tags"));
disk_object_total = get_total_object_values(&objects->disk_sizes);
stats_table_size_addf(table, disk_object_total,
" * %s", _("Disk size"));
stats_table_size_addf(table, objects->disk_sizes.commits,
" * %s", _("Commits"));
stats_table_size_addf(table, objects->disk_sizes.trees,
" * %s", _("Trees"));
stats_table_size_addf(table, objects->disk_sizes.blobs,
" * %s", _("Blobs"));
stats_table_size_addf(table, objects->disk_sizes.tags,
" * %s", _("Tags"));
stats_table_count_addf(table, object_total, " * %s", _("Count"));
stats_table_count_addf(table, objects->commits, " * %s", _("Commits"));
stats_table_count_addf(table, objects->trees, " * %s", _("Trees"));
stats_table_count_addf(table, objects->blobs, " * %s", _("Blobs"));
stats_table_count_addf(table, objects->tags, " * %s", _("Tags"));
}
static void stats_table_print_structure(const struct stats_table *table)
{
const char *name_col_title = _("Repository structure");
const char *value_col_title = _("Value");
int title_name_width = utf8_strwidth(name_col_title);
int title_value_width = utf8_strwidth(value_col_title);
int name_col_width = table->name_col_width;
int value_col_width = table->value_col_width;
int unit_col_width = table->unit_col_width;
int name_col_width = utf8_strwidth(name_col_title);
int value_col_width = utf8_strwidth(value_col_title);
struct string_list_item *item;
struct strbuf buf = STRBUF_INIT;
if (title_name_width > name_col_width)
name_col_width = title_name_width;
if (title_value_width > value_col_width + unit_col_width + 1)
value_col_width = title_value_width - unit_col_width;
strbuf_addstr(&buf, "| ");
strbuf_utf8_align(&buf, ALIGN_LEFT, name_col_width, name_col_title);
strbuf_addstr(&buf, " | ");
strbuf_utf8_align(&buf, ALIGN_LEFT,
value_col_width + unit_col_width + 1, value_col_title);
strbuf_addstr(&buf, " |");
printf("%s\n", buf.buf);
if (table->name_col_width > name_col_width)
name_col_width = table->name_col_width;
if (table->value_col_width > value_col_width)
value_col_width = table->value_col_width;
printf("| %-*s | %-*s |\n", name_col_width, name_col_title,
value_col_width, value_col_title);
printf("| ");
for (int i = 0; i < name_col_width; i++)
putchar('-');
printf(" | ");
for (int i = 0; i < value_col_width + unit_col_width + 1; i++)
for (int i = 0; i < value_col_width; i++)
putchar('-');
printf(" |\n");
for_each_string_list_item(item, &table->rows) {
struct stats_table_entry *entry = item->util;
const char *value = "";
const char *unit = "";
if (entry) {
struct stats_table_entry *entry = item->util;
value = entry->value;
if (entry->unit)
unit = entry->unit;
}
strbuf_reset(&buf);
strbuf_addstr(&buf, "| ");
strbuf_utf8_align(&buf, ALIGN_LEFT, name_col_width, item->string);
strbuf_addstr(&buf, " | ");
strbuf_utf8_align(&buf, ALIGN_RIGHT, value_col_width, value);
strbuf_addch(&buf, ' ');
strbuf_utf8_align(&buf, ALIGN_LEFT, unit_col_width, unit);
strbuf_addstr(&buf, " |");
printf("%s\n", buf.buf);
printf("| %-*s | %*s |\n", name_col_width, item->string,
value_col_width, value);
}
strbuf_release(&buf);
}
static void stats_table_clear(struct stats_table *table)
@ -459,31 +349,13 @@ static void structure_keyvalue_print(struct repo_structure *stats,
(uintmax_t)stats->refs.others, value_delim);
printf("objects.commits.count%c%" PRIuMAX "%c", key_delim,
(uintmax_t)stats->objects.type_counts.commits, value_delim);
(uintmax_t)stats->objects.commits, value_delim);
printf("objects.trees.count%c%" PRIuMAX "%c", key_delim,
(uintmax_t)stats->objects.type_counts.trees, value_delim);
(uintmax_t)stats->objects.trees, value_delim);
printf("objects.blobs.count%c%" PRIuMAX "%c", key_delim,
(uintmax_t)stats->objects.type_counts.blobs, value_delim);
(uintmax_t)stats->objects.blobs, value_delim);
printf("objects.tags.count%c%" PRIuMAX "%c", key_delim,
(uintmax_t)stats->objects.type_counts.tags, value_delim);
printf("objects.commits.inflated_size%c%" PRIuMAX "%c", key_delim,
(uintmax_t)stats->objects.inflated_sizes.commits, value_delim);
printf("objects.trees.inflated_size%c%" PRIuMAX "%c", key_delim,
(uintmax_t)stats->objects.inflated_sizes.trees, value_delim);
printf("objects.blobs.inflated_size%c%" PRIuMAX "%c", key_delim,
(uintmax_t)stats->objects.inflated_sizes.blobs, value_delim);
printf("objects.tags.inflated_size%c%" PRIuMAX "%c", key_delim,
(uintmax_t)stats->objects.inflated_sizes.tags, value_delim);
printf("objects.commits.disk_size%c%" PRIuMAX "%c", key_delim,
(uintmax_t)stats->objects.disk_sizes.commits, value_delim);
printf("objects.trees.disk_size%c%" PRIuMAX "%c", key_delim,
(uintmax_t)stats->objects.disk_sizes.trees, value_delim);
printf("objects.blobs.disk_size%c%" PRIuMAX "%c", key_delim,
(uintmax_t)stats->objects.disk_sizes.blobs, value_delim);
printf("objects.tags.disk_size%c%" PRIuMAX "%c", key_delim,
(uintmax_t)stats->objects.disk_sizes.tags, value_delim);
(uintmax_t)stats->objects.tags, value_delim);
fflush(stdout);
}
@ -494,13 +366,16 @@ struct count_references_data {
struct progress *progress;
};
static int count_references(const struct reference *ref, void *cb_data)
static int count_references(const char *refname,
const char *referent UNUSED,
const struct object_id *oid,
int flags UNUSED, void *cb_data)
{
struct count_references_data *data = cb_data;
struct ref_stats *stats = data->stats;
size_t ref_count;
switch (ref_kind_from_refname(ref->name)) {
switch (ref_kind_from_refname(refname)) {
case FILTER_REFS_BRANCHES:
stats->branches++;
break;
@ -521,7 +396,7 @@ static int count_references(const struct reference *ref, void *cb_data)
* While iterating through references for counting, also add OIDs in
* preparation for the path walk.
*/
add_pending_oid(data->revs, NULL, ref->oid, 0);
add_pending_oid(data->revs, NULL, oid, 0);
ref_count = get_total_reference_count(stats);
display_progress(data->progress, ref_count);
@ -548,7 +423,6 @@ static void structure_count_references(struct ref_stats *stats,
}
struct count_objects_data {
struct object_database *odb;
struct object_stats *stats;
struct progress *progress;
};
@ -558,53 +432,26 @@ static int count_objects(const char *path UNUSED, struct oid_array *oids,
{
struct count_objects_data *data = cb_data;
struct object_stats *stats = data->stats;
size_t inflated_total = 0;
size_t disk_total = 0;
size_t object_count;
for (size_t i = 0; i < oids->nr; i++) {
struct object_info oi = OBJECT_INFO_INIT;
unsigned long inflated;
off_t disk;
oi.sizep = &inflated;
oi.disk_sizep = &disk;
if (odb_read_object_info_extended(data->odb, &oids->oid[i], &oi,
OBJECT_INFO_SKIP_FETCH_OBJECT |
OBJECT_INFO_QUICK) < 0)
continue;
inflated_total += inflated;
disk_total += disk;
}
switch (type) {
case OBJ_TAG:
stats->type_counts.tags += oids->nr;
stats->inflated_sizes.tags += inflated_total;
stats->disk_sizes.tags += disk_total;
stats->tags += oids->nr;
break;
case OBJ_COMMIT:
stats->type_counts.commits += oids->nr;
stats->inflated_sizes.commits += inflated_total;
stats->disk_sizes.commits += disk_total;
stats->commits += oids->nr;
break;
case OBJ_TREE:
stats->type_counts.trees += oids->nr;
stats->inflated_sizes.trees += inflated_total;
stats->disk_sizes.trees += disk_total;
stats->trees += oids->nr;
break;
case OBJ_BLOB:
stats->type_counts.blobs += oids->nr;
stats->inflated_sizes.blobs += inflated_total;
stats->disk_sizes.blobs += disk_total;
stats->blobs += oids->nr;
break;
default:
BUG("invalid object type");
}
object_count = get_total_object_values(&stats->type_counts);
object_count = get_total_object_count(stats);
display_progress(data->progress, object_count);
return 0;
@ -616,7 +463,6 @@ static void structure_count_objects(struct object_stats *stats,
{
struct path_walk_info info = PATH_WALK_INFO_INIT;
struct count_objects_data data = {
.odb = repo->objects,
.stats = stats,
};
@ -646,10 +492,6 @@ static int cmd_repo_structure(int argc, const char **argv, const char *prefix,
OPT_CALLBACK_F(0, "format", &format, N_("format"),
N_("output format"),
PARSE_OPT_NONEG, parse_format_cb),
OPT_CALLBACK_F('z', NULL, &format, NULL,
N_("synonym for --format=nul"),
PARSE_OPT_NONEG | PARSE_OPT_NOARG,
parse_format_cb),
OPT_BOOL(0, "progress", &show_progress, N_("show progress")),
OPT_END()
};

View File

@ -217,17 +217,19 @@ static int show_default(void)
return 0;
}
static int show_reference(const struct reference *ref, void *cb_data UNUSED)
static int show_reference(const char *refname, const char *referent UNUSED, const struct object_id *oid,
int flag UNUSED, void *cb_data UNUSED)
{
if (ref_excluded(&ref_excludes, ref->name))
if (ref_excluded(&ref_excludes, refname))
return 0;
show_rev(NORMAL, ref->oid, ref->name);
show_rev(NORMAL, oid, refname);
return 0;
}
static int anti_reference(const struct reference *ref, void *cb_data UNUSED)
static int anti_reference(const char *refname, const char *referent UNUSED, const struct object_id *oid,
int flag UNUSED, void *cb_data UNUSED)
{
show_rev(REVERSED, ref->oid, ref->name);
show_rev(REVERSED, oid, refname);
return 0;
}

View File

@ -18,7 +18,6 @@
#include "commit-slab.h"
#include "date.h"
#include "wildmatch.h"
#include "prio-queue.h"
static const char*const show_branch_usage[] = {
N_("git show-branch [-a | --all] [-r | --remotes] [--topo-order | --date-order]\n"
@ -60,10 +59,11 @@ static const char *get_color_reset_code(void)
return "";
}
static struct commit *interesting(struct prio_queue *queue)
static struct commit *interesting(struct commit_list *list)
{
for (size_t i = 0; i < queue->nr; i++) {
struct commit *commit = queue->array[i].data;
while (list) {
struct commit *commit = list->item;
list = list->next;
if (commit->object.flags & UNINTERESTING)
continue;
return commit;
@ -222,18 +222,17 @@ static int mark_seen(struct commit *commit, struct commit_list **seen_p)
return 0;
}
static void join_revs(struct prio_queue *queue,
static void join_revs(struct commit_list **list_p,
struct commit_list **seen_p,
int num_rev, int extra)
{
int all_mask = ((1u << (REV_SHIFT + num_rev)) - 1);
int all_revs = all_mask & ~((1u << REV_SHIFT) - 1);
while (queue->nr) {
while (*list_p) {
struct commit_list *parents;
int still_interesting = !!interesting(queue);
struct commit *commit = prio_queue_peek(queue);
bool get_pending = true;
int still_interesting = !!interesting(*list_p);
struct commit *commit = pop_commit(list_p);
int flags = commit->object.flags & all_mask;
if (!still_interesting && extra <= 0)
@ -254,14 +253,8 @@ static void join_revs(struct prio_queue *queue,
if (mark_seen(p, seen_p) && !still_interesting)
extra--;
p->object.flags |= flags;
if (get_pending)
prio_queue_replace(queue, p);
else
prio_queue_put(queue, p);
get_pending = false;
commit_list_insert_by_date(p, list_p);
}
if (get_pending)
prio_queue_get(queue);
}
/*
@ -420,32 +413,34 @@ static int append_ref(const char *refname, const struct object_id *oid,
return 0;
}
static int append_head_ref(const struct reference *ref, void *cb_data UNUSED)
static int append_head_ref(const char *refname, const char *referent UNUSED, const struct object_id *oid,
int flag UNUSED, void *cb_data UNUSED)
{
struct object_id tmp;
int ofs = 11;
if (!starts_with(ref->name, "refs/heads/"))
if (!starts_with(refname, "refs/heads/"))
return 0;
/* If both heads/foo and tags/foo exists, get_sha1 would
* get confused.
*/
if (repo_get_oid(the_repository, ref->name + ofs, &tmp) || !oideq(&tmp, ref->oid))
if (repo_get_oid(the_repository, refname + ofs, &tmp) || !oideq(&tmp, oid))
ofs = 5;
return append_ref(ref->name + ofs, ref->oid, 0);
return append_ref(refname + ofs, oid, 0);
}
static int append_remote_ref(const struct reference *ref, void *cb_data UNUSED)
static int append_remote_ref(const char *refname, const char *referent UNUSED, const struct object_id *oid,
int flag UNUSED, void *cb_data UNUSED)
{
struct object_id tmp;
int ofs = 13;
if (!starts_with(ref->name, "refs/remotes/"))
if (!starts_with(refname, "refs/remotes/"))
return 0;
/* If both heads/foo and tags/foo exists, get_sha1 would
* get confused.
*/
if (repo_get_oid(the_repository, ref->name + ofs, &tmp) || !oideq(&tmp, ref->oid))
if (repo_get_oid(the_repository, refname + ofs, &tmp) || !oideq(&tmp, oid))
ofs = 5;
return append_ref(ref->name + ofs, ref->oid, 0);
return append_ref(refname + ofs, oid, 0);
}
static int append_tag_ref(const char *refname, const struct object_id *oid,
@ -459,26 +454,27 @@ static int append_tag_ref(const char *refname, const struct object_id *oid,
static const char *match_ref_pattern = NULL;
static int match_ref_slash = 0;
static int append_matching_ref(const struct reference *ref, void *cb_data)
static int append_matching_ref(const char *refname, const char *referent UNUSED, const struct object_id *oid,
int flag, void *cb_data)
{
/* we want to allow pattern hold/<asterisk> to show all
* branches under refs/heads/hold/, and v0.99.9? to show
* refs/tags/v0.99.9a and friends.
*/
const char *tail;
int slash = count_slashes(ref->name);
for (tail = ref->name; *tail && match_ref_slash < slash; )
int slash = count_slashes(refname);
for (tail = refname; *tail && match_ref_slash < slash; )
if (*tail++ == '/')
slash--;
if (!*tail)
return 0;
if (wildmatch(match_ref_pattern, tail, 0))
return 0;
if (starts_with(ref->name, "refs/heads/"))
return append_head_ref(ref, cb_data);
if (starts_with(ref->name, "refs/tags/"))
return append_tag_ref(ref->name, ref->oid, ref->flags, cb_data);
return append_ref(ref->name, ref->oid, 0);
if (starts_with(refname, "refs/heads/"))
return append_head_ref(refname, NULL, oid, flag, cb_data);
if (starts_with(refname, "refs/tags/"))
return append_tag_ref(refname, oid, flag, cb_data);
return append_ref(refname, oid, 0);
}
static void snarf_refs(int head, int remotes)
@ -646,8 +642,7 @@ int cmd_show_branch(int ac,
{
struct commit *rev[MAX_REVS], *commit;
char *reflog_msg[MAX_REVS] = {0};
struct commit_list *seen = NULL;
struct prio_queue queue = { compare_commits_by_commit_date };
struct commit_list *list = NULL, *seen = NULL;
unsigned int rev_mask[MAX_REVS];
int num_rev, i, extra = 0;
int all_heads = 0, all_remotes = 0;
@ -891,14 +886,14 @@ int cmd_show_branch(int ac,
*/
commit->object.flags |= flag;
if (commit->object.flags == flag)
prio_queue_put(&queue, commit);
commit_list_insert_by_date(commit, &list);
rev[num_rev] = commit;
}
for (i = 0; i < num_rev; i++)
rev_mask[i] = rev[i]->object.flags;
if (0 <= extra)
join_revs(&queue, &seen, num_rev, extra);
join_revs(&list, &seen, num_rev, extra);
commit_list_sort_by_date(&seen);
@ -1009,7 +1004,7 @@ out:
for (size_t i = 0; i < ARRAY_SIZE(reflog_msg); i++)
free(reflog_msg[i]);
free_commit_list(seen);
clear_prio_queue(&queue);
free_commit_list(list);
free(args_copy);
free(head);
return ret;

View File

@ -31,31 +31,31 @@ struct show_one_options {
};
static void show_one(const struct show_one_options *opts,
const struct reference *ref)
const char *refname, const struct object_id *oid)
{
const char *hex;
struct object_id peeled;
if (!odb_has_object(the_repository->objects, ref->oid,
if (!odb_has_object(the_repository->objects, oid,
HAS_OBJECT_RECHECK_PACKED | HAS_OBJECT_FETCH_PROMISOR))
die("git show-ref: bad ref %s (%s)", ref->name,
oid_to_hex(ref->oid));
die("git show-ref: bad ref %s (%s)", refname,
oid_to_hex(oid));
if (opts->quiet)
return;
hex = repo_find_unique_abbrev(the_repository, ref->oid, opts->abbrev);
hex = repo_find_unique_abbrev(the_repository, oid, opts->abbrev);
if (opts->hash_only)
printf("%s\n", hex);
else
printf("%s %s\n", hex, ref->name);
printf("%s %s\n", hex, refname);
if (!opts->deref_tags)
return;
if (!reference_get_peeled_oid(the_repository, ref, &peeled)) {
if (!peel_iterated_oid(the_repository, oid, &peeled)) {
hex = repo_find_unique_abbrev(the_repository, &peeled, opts->abbrev);
printf("%s %s^{}\n", hex, ref->name);
printf("%s %s^{}\n", hex, refname);
}
}
@ -66,25 +66,26 @@ struct show_ref_data {
int show_head;
};
static int show_ref(const struct reference *ref, void *cbdata)
static int show_ref(const char *refname, const char *referent UNUSED, const struct object_id *oid,
int flag UNUSED, void *cbdata)
{
struct show_ref_data *data = cbdata;
if (data->show_head && !strcmp(ref->name, "HEAD"))
if (data->show_head && !strcmp(refname, "HEAD"))
goto match;
if (data->patterns) {
int reflen = strlen(ref->name);
int reflen = strlen(refname);
const char **p = data->patterns, *m;
while ((m = *p++) != NULL) {
int len = strlen(m);
if (len > reflen)
continue;
if (memcmp(m, ref->name + reflen - len, len))
if (memcmp(m, refname + reflen - len, len))
continue;
if (len == reflen)
goto match;
if (ref->name[reflen - len - 1] == '/')
if (refname[reflen - len - 1] == '/')
goto match;
}
return 0;
@ -93,15 +94,18 @@ static int show_ref(const struct reference *ref, void *cbdata)
match:
data->found_match++;
show_one(data->show_one_opts, ref);
show_one(data->show_one_opts, refname, oid);
return 0;
}
static int add_existing(const struct reference *ref, void *cbdata)
static int add_existing(const char *refname,
const char *referent UNUSED,
const struct object_id *oid UNUSED,
int flag UNUSED, void *cbdata)
{
struct string_list *list = (struct string_list *)cbdata;
string_list_insert(list, ref->name);
string_list_insert(list, refname);
return 0;
}
@ -175,18 +179,12 @@ static int cmd_show_ref__verify(const struct show_one_options *show_one_opts,
if ((starts_with(*refs, "refs/") || refname_is_safe(*refs)) &&
!refs_read_ref(get_main_ref_store(the_repository), *refs, &oid)) {
struct reference ref = {
.name = *refs,
.oid = &oid,
};
show_one(show_one_opts, &ref);
} else if (!show_one_opts->quiet) {
die("'%s' - not a valid ref", *refs);
} else {
return 1;
show_one(show_one_opts, *refs, &oid);
}
else if (!show_one_opts->quiet)
die("'%s' - not a valid ref", *refs);
else
return 1;
refs++;
}

View File

@ -593,12 +593,16 @@ static void print_status(unsigned int flags, char state, const char *path,
printf("\n");
}
static int handle_submodule_head_ref(const struct reference *ref, void *cb_data)
static int handle_submodule_head_ref(const char *refname UNUSED,
const char *referent UNUSED,
const struct object_id *oid,
int flags UNUSED,
void *cb_data)
{
struct object_id *output = cb_data;
if (ref->oid)
oidcpy(output, ref->oid);
if (oid)
oidcpy(output, oid);
return 0;
}
@ -1903,13 +1907,6 @@ static int determine_submodule_update_strategy(struct repository *r,
const char *val;
int ret;
/*
* NEEDSWORK: audit and ensure that update_submodule() has right
* to assume that submodule_from_path() above will always succeed.
*/
if (!sub)
BUG("update_submodule assumes a submodule exists at path (%s)",
path);
key = xstrfmt("submodule.%s.update", sub->name);
if (update) {
@ -3534,15 +3531,14 @@ static int module_add(int argc, const char **argv, const char *prefix,
}
}
if (!add_data.sm_name)
if(!add_data.sm_name)
add_data.sm_name = add_data.sm_path;
existing = submodule_from_name(the_repository,
null_oid(the_hash_algo),
add_data.sm_name);
if (existing && existing->path &&
strcmp(existing->path, add_data.sm_path)) {
if (existing && strcmp(existing->path, add_data.sm_path)) {
if (!force) {
die(_("submodule name '%s' already used for path '%s'"),
add_data.sm_name, existing->path);

View File

@ -149,11 +149,11 @@ static int verify_tag(const char *name, const char *ref UNUSED,
if (format->format)
flags = GPG_VERIFY_OMIT_STATUS;
if (gpg_verify_tag(the_repository, oid, name, flags))
if (gpg_verify_tag(oid, name, flags))
return -1;
if (format->format)
pretty_print_ref(name, oid, NULL, format);
pretty_print_ref(name, oid, format);
return 0;
}

View File

@ -363,7 +363,7 @@ struct input_zstream_data {
int status;
};
static const void *feed_input_zstream(struct odb_write_stream *in_stream,
static const void *feed_input_zstream(struct input_stream *in_stream,
unsigned long *readlen)
{
struct input_zstream_data *data = in_stream->data;
@ -393,7 +393,7 @@ static void stream_blob(unsigned long size, unsigned nr)
{
git_zstream zstream = { 0 };
struct input_zstream_data data = { 0 };
struct odb_write_stream in_stream = {
struct input_stream in_stream = {
.read = feed_input_zstream,
.data = &data,
};
@ -402,7 +402,8 @@ static void stream_blob(unsigned long size, unsigned nr)
data.zstream = &zstream;
git_inflate_init(&zstream);
if (odb_write_object_stream(the_repository->objects, &in_stream, size, &info->oid))
if (stream_loose_object(the_repository->objects->sources,
&in_stream, size, &info->oid))
die(_("failed to write object in stream"));
if (data.status != Z_STREAM_END)

View File

@ -4,8 +4,8 @@
#define USE_THE_REPOSITORY_VARIABLE
#include "builtin.h"
#include "archive.h"
#include "path.h"
#include "pkt-line.h"
#include "setup.h"
#include "sideband.h"
#include "run-command.h"
#include "strvec.h"

View File

@ -5,11 +5,11 @@
#include "gettext.h"
#include "pkt-line.h"
#include "parse-options.h"
#include "path.h"
#include "protocol.h"
#include "replace-object.h"
#include "upload-pack.h"
#include "serve.h"
#include "setup.h"
#include "commit.h"
#include "environment.h"

View File

@ -61,13 +61,13 @@ int cmd_verify_tag(int argc,
continue;
}
if (gpg_verify_tag(repo, &oid, name, flags)) {
if (gpg_verify_tag(&oid, name, flags)) {
had_error = 1;
continue;
}
if (format.format)
pretty_print_ref(name, &oid, NULL, &format);
pretty_print_ref(name, &oid, &format);
}
return had_error;
}

View File

@ -635,7 +635,11 @@ static void print_preparing_worktree_line(int detach,
*
* Returns 0 on failure and non-zero on success.
*/
static int first_valid_ref(const struct reference *ref UNUSED, void *cb_data UNUSED)
static int first_valid_ref(const char *refname UNUSED,
const char *referent UNUSED,
const struct object_id *oid UNUSED,
int flags UNUSED,
void *cb_data UNUSED)
{
return 1;
}
@ -975,18 +979,14 @@ static void show_worktree_porcelain(struct worktree *wt, int line_terminator)
fputc(line_terminator, stdout);
}
struct worktree_display {
char *path;
int width;
};
static void show_worktree(struct worktree *wt, struct worktree_display *display,
int path_maxwidth, int abbrev_len)
static void show_worktree(struct worktree *wt, int path_maxlen, int abbrev_len)
{
struct strbuf sb = STRBUF_INIT;
int cur_path_len = strlen(wt->path);
int path_adj = cur_path_len - utf8_strwidth(wt->path);
const char *reason;
strbuf_addf(&sb, "%s%*s", display->path, 1 + path_maxwidth - display->width, "");
strbuf_addf(&sb, "%-*s ", 1 + path_maxlen + path_adj, wt->path);
if (wt->is_bare)
strbuf_addstr(&sb, "(bare)");
else {
@ -1020,27 +1020,20 @@ static void show_worktree(struct worktree *wt, struct worktree_display *display,
strbuf_release(&sb);
}
static void measure_widths(struct worktree **wt, int *abbrev,
struct worktree_display **d, int *maxwidth)
static void measure_widths(struct worktree **wt, int *abbrev, int *maxlen)
{
int i, display_alloc = 0;
struct worktree_display *display = NULL;
struct strbuf buf = STRBUF_INIT;
int i;
for (i = 0; wt[i]; i++) {
int sha1_len;
ALLOC_GROW(display, i + 1, display_alloc);
quote_path(wt[i]->path, NULL, &buf, 0);
display[i].width = utf8_strwidth(buf.buf);
display[i].path = strbuf_detach(&buf, NULL);
int path_len = strlen(wt[i]->path);
if (display[i].width > *maxwidth)
*maxwidth = display[i].width;
if (path_len > *maxlen)
*maxlen = path_len;
sha1_len = strlen(repo_find_unique_abbrev(the_repository, &wt[i]->head_oid, *abbrev));
if (sha1_len > *abbrev)
*abbrev = sha1_len;
}
*d = display;
}
static int pathcmp(const void *a_, const void *b_)
@ -1086,27 +1079,21 @@ static int list(int ac, const char **av, const char *prefix,
die(_("the option '%s' requires '%s'"), "-z", "--porcelain");
else {
struct worktree **worktrees = get_worktrees();
int path_maxwidth = 0, abbrev = DEFAULT_ABBREV, i;
struct worktree_display *display = NULL;
int path_maxlen = 0, abbrev = DEFAULT_ABBREV, i;
/* sort worktrees by path but keep main worktree at top */
pathsort(worktrees + 1);
if (!porcelain)
measure_widths(worktrees, &abbrev,
&display, &path_maxwidth);
measure_widths(worktrees, &abbrev, &path_maxlen);
for (i = 0; worktrees[i]; i++) {
if (porcelain)
show_worktree_porcelain(worktrees[i],
line_terminator);
else
show_worktree(worktrees[i],
&display[i], path_maxwidth, abbrev);
show_worktree(worktrees[i], path_maxlen, abbrev);
}
for (i = 0; display && worktrees[i]; i++)
free(display[i].path);
free(display);
free_worktrees(worktrees);
}
return 0;

View File

@ -548,41 +548,12 @@ void cache_tree_write(struct strbuf *sb, struct cache_tree *root)
trace2_region_leave("cache_tree", "write", the_repository);
}
static int parse_int(const char **ptr, unsigned long *len_p, int *out)
{
const char *s = *ptr;
unsigned long len = *len_p;
int ret = 0;
int sign = 1;
while (len && *s == '-') {
sign *= -1;
s++;
len--;
}
while (len) {
if (!isdigit(*s))
break;
ret *= 10;
ret += *s - '0';
s++;
len--;
}
if (s == *ptr)
return -1;
*ptr = s;
*len_p = len;
*out = sign * ret;
return 0;
}
static struct cache_tree *read_one(const char **buffer, unsigned long *size_p)
{
const char *buf = *buffer;
unsigned long size = *size_p;
const char *cp;
char *ep;
struct cache_tree *it;
int i, subtree_nr;
const unsigned rawsz = the_hash_algo->rawsz;
@ -598,14 +569,19 @@ static struct cache_tree *read_one(const char **buffer, unsigned long *size_p)
buf++; size--;
it = cache_tree();
if (parse_int(&buf, &size, &it->entry_count) < 0)
cp = buf;
it->entry_count = strtol(cp, &ep, 10);
if (cp == ep)
goto free_return;
if (!size || *buf != ' ')
cp = ep;
subtree_nr = strtol(cp, &ep, 10);
if (cp == ep)
goto free_return;
buf++; size--;
if (parse_int(&buf, &size, &subtree_nr) < 0)
goto free_return;
if (!size || *buf != '\n')
while (size && *buf && *buf != '\n') {
size--;
buf++;
}
if (!size)
goto free_return;
buf++; size--;
if (0 <= it->entry_count) {

View File

@ -25,24 +25,6 @@ void chdir_notify_register(const char *name,
list_add_tail(&e->list, &chdir_notify_entries);
}
void chdir_notify_unregister(const char *name, chdir_notify_callback cb,
void *data)
{
struct list_head *pos, *p;
list_for_each_safe(pos, p, &chdir_notify_entries) {
struct chdir_notify_entry *e =
list_entry(pos, struct chdir_notify_entry, list);
if (e->cb != cb || e->data != data || !e->name != !name ||
(e->name && strcmp(e->name, name)))
continue;
list_del(pos);
free(e);
}
}
static void reparent_cb(const char *name,
const char *old_cwd,
const char *new_cwd,

View File

@ -41,8 +41,6 @@ typedef void (*chdir_notify_callback)(const char *name,
const char *new_cwd,
void *data);
void chdir_notify_register(const char *name, chdir_notify_callback cb, void *data);
void chdir_notify_unregister(const char *name, chdir_notify_callback cb,
void *data);
void chdir_notify_reparent(const char *name, char **path);
/*

View File

@ -109,7 +109,7 @@ macos-*)
brew link --force gettext
mkdir -p "$CUSTOM_PATH"
wget -q "$P4WHENCE/bin.macosx12arm64/helix-core-server.tgz" &&
wget -q "$P4WHENCE/bin.macosx1015x86_64/helix-core-server.tgz" &&
tar -xf helix-core-server.tgz -C "$CUSTOM_PATH" p4 p4d &&
sudo xattr -d com.apple.quarantine "$CUSTOM_PATH/p4" "$CUSTOM_PATH/p4d" 2>/dev/null || true
rm helix-core-server.tgz

View File

@ -1,13 +0,0 @@
#!/bin/sh
# We must load the build options so we know where to find
# things like TEST_OUTPUT_DIRECTORY. This has to come before
# loading lib.sh, though, because it may clobber some CI lib
# variables like our custom GIT_TEST_OPTS.
. "$1"/GIT-BUILD-OPTIONS
. ${0%/*}/lib.sh
group "Run tests" \
meson test -C "$1" --no-rebuild --print-errorlogs \
--test-args="$GIT_TEST_OPTS" --slice "$((1+$2))/$3" ||
handle_failed_tests

View File

@ -1851,16 +1851,18 @@ struct refs_cb_data {
struct progress *progress;
};
static int add_ref_to_set(const struct reference *ref, void *cb_data)
static int add_ref_to_set(const char *refname UNUSED,
const char *referent UNUSED,
const struct object_id *oid,
int flags UNUSED, void *cb_data)
{
const struct object_id *maybe_peeled = ref->oid;
struct object_id peeled;
struct refs_cb_data *data = (struct refs_cb_data *)cb_data;
if (!reference_get_peeled_oid(data->repo, ref, &peeled))
maybe_peeled = &peeled;
if (odb_read_object_info(data->repo->objects, maybe_peeled, NULL) == OBJ_COMMIT)
oidset_insert(data->commits, maybe_peeled);
if (!peel_iterated_oid(data->repo, oid, &peeled))
oid = &peeled;
if (odb_read_object_info(data->repo->objects, oid, NULL) == OBJ_COMMIT)
oidset_insert(data->commits, oid);
display_progress(data->progress, oidset_size(data->commits));

View File

@ -1315,8 +1315,7 @@ free_return:
free(buf);
}
int verify_commit_buffer(const char *buffer, size_t size,
struct signature_check *sigc)
int check_commit_signature(const struct commit *commit, struct signature_check *sigc)
{
struct strbuf payload = STRBUF_INIT;
struct strbuf signature = STRBUF_INIT;
@ -1324,8 +1323,7 @@ int verify_commit_buffer(const char *buffer, size_t size,
sigc->result = 'N';
if (parse_buffer_signed_by_header(buffer, size, &payload,
&signature, the_hash_algo) <= 0)
if (parse_signed_commit(commit, &payload, &signature, the_hash_algo) <= 0)
goto out;
sigc->payload_type = SIGNATURE_PAYLOAD_COMMIT;
@ -1339,17 +1337,6 @@ int verify_commit_buffer(const char *buffer, size_t size,
return ret;
}
int check_commit_signature(const struct commit *commit, struct signature_check *sigc)
{
unsigned long size;
const char *buffer = repo_get_commit_buffer(the_repository, commit, &size);
int ret = verify_commit_buffer(buffer, size, sigc);
repo_unuse_commit_buffer(the_repository, commit, buffer);
return ret;
}
void verify_merge_signature(struct commit *commit, int verbosity,
int check_trust)
{
@ -1978,9 +1965,6 @@ int run_commit_hook(int editor_is_used, const char *index_file,
strvec_push(&opt.args, arg);
va_end(args);
/* All commit hook use-cases require ungrouping child output. */
opt.ungroup = 1;
opt.invoked_hook = invoked_hook;
return run_hooks_opt(the_repository, name, &opt);
}

View File

@ -333,13 +333,6 @@ int remove_signature(struct strbuf *buf);
*/
int check_commit_signature(const struct commit *commit, struct signature_check *sigc);
/*
* Same as check_commit_signature() but accepts a commit buffer and
* its size, instead of a `struct commit *`.
*/
int verify_commit_buffer(const char *buffer, size_t size,
struct signature_check *sigc);
/* record author-date for each commit object */
struct author_date_slab;
void record_author_date(struct author_date_slab *author_date,

Some files were not shown because too many files have changed in this diff Show More