Show More
The requested changes are too big and content was truncated. Show full diff
@@ -0,0 +1,60 b'' | |||
|
1 | # Instructions: | |
|
2 | # | |
|
3 | # 1. cargo install --version 0.5.0 pyoxidizer | |
|
4 | # 2. cd /path/to/hg | |
|
5 | # 3. pyoxidizer build --path contrib/packaging [--release] | |
|
6 | # 4. Run build/pyoxidizer/<arch>/<debug|release>/app/hg | |
|
7 | # | |
|
8 | # If you need to build again, you need to remove the build/lib.* and | |
|
9 | # build/temp.* directories, otherwise PyOxidizer fails to pick up C | |
|
10 | # extensions. This is a bug in PyOxidizer. | |
|
11 | ||
|
12 | ROOT = CWD + "/../.." | |
|
13 | ||
|
14 | set_build_path(ROOT + "/build/pyoxidizer") | |
|
15 | ||
|
16 | def make_exe(): | |
|
17 | dist = default_python_distribution() | |
|
18 | ||
|
19 | code = "import hgdemandimport; hgdemandimport.enable(); from mercurial import dispatch; dispatch.run()" | |
|
20 | ||
|
21 | config = PythonInterpreterConfig( | |
|
22 | raw_allocator = "system", | |
|
23 | run_eval = code, | |
|
24 | # We want to let the user load extensions from the file system | |
|
25 | filesystem_importer = True, | |
|
26 | # We need this to make resourceutil happy, since it looks for sys.frozen. | |
|
27 | sys_frozen = True, | |
|
28 | legacy_windows_stdio = True, | |
|
29 | ) | |
|
30 | ||
|
31 | exe = dist.to_python_executable( | |
|
32 | name = "hg", | |
|
33 | config = config, | |
|
34 | ) | |
|
35 | ||
|
36 | # Use setup.py install to build Mercurial and collect Python resources to | |
|
37 | # embed in the executable. | |
|
38 | resources = dist.setup_py_install(ROOT) | |
|
39 | exe.add_python_resources(resources) | |
|
40 | ||
|
41 | return exe | |
|
42 | ||
|
43 | def make_install(exe): | |
|
44 | m = FileManifest() | |
|
45 | ||
|
46 | # `hg` goes in root directory. | |
|
47 | m.add_python_resource(".", exe) | |
|
48 | ||
|
49 | templates = glob( | |
|
50 | include=[ROOT + "/mercurial/templates/**/*"], | |
|
51 | strip_prefix = ROOT + "/mercurial/", | |
|
52 | ) | |
|
53 | m.add_manifest(templates) | |
|
54 | ||
|
55 | return m | |
|
56 | ||
|
57 | register_target("exe", make_exe) | |
|
58 | register_target("app", make_install, depends = ["exe"], default = True) | |
|
59 | ||
|
60 | resolve_targets() |
@@ -0,0 +1,93 b'' | |||
|
1 | #!/usr/bin/env python | |
|
2 | # | |
|
3 | # A small script to automatically reject idle Diffs | |
|
4 | # | |
|
5 | # you need to set the PHABBOT_USER and PHABBOT_TOKEN environment variable for authentication | |
|
6 | from __future__ import absolute_import, print_function | |
|
7 | ||
|
8 | import datetime | |
|
9 | import os | |
|
10 | import sys | |
|
11 | ||
|
12 | import phabricator | |
|
13 | ||
|
14 | MESSAGE = """There seems to have been no activities on this Diff for the past 3 Months. | |
|
15 | ||
|
16 | By policy, we are automatically moving it out of the `need-review` state. | |
|
17 | ||
|
18 | Please, move it back to `need-review` without hesitation if this diff should still be discussed. | |
|
19 | ||
|
20 | :baymax:need-review-idle: | |
|
21 | """ | |
|
22 | ||
|
23 | ||
|
24 | PHAB_URL = "https://phab.mercurial-scm.org/api/" | |
|
25 | USER = os.environ.get("PHABBOT_USER", "baymax") | |
|
26 | TOKEN = os.environ.get("PHABBOT_TOKEN") | |
|
27 | ||
|
28 | ||
|
29 | NOW = datetime.datetime.now() | |
|
30 | ||
|
31 | # 3 months in seconds | |
|
32 | DELAY = 60 * 60 * 24 * 30 * 3 | |
|
33 | ||
|
34 | ||
|
35 | def get_all_diff(phab): | |
|
36 | """Fetch all the diff that the need review""" | |
|
37 | return phab.differential.query( | |
|
38 | status="status-needs-review", | |
|
39 | order="order-modified", | |
|
40 | paths=[('HG', None)], | |
|
41 | ) | |
|
42 | ||
|
43 | ||
|
44 | def filter_diffs(diffs, older_than): | |
|
45 | """filter diffs to only keep the one unmodified sin <older_than> seconds""" | |
|
46 | olds = [] | |
|
47 | for d in diffs: | |
|
48 | modified = int(d['dateModified']) | |
|
49 | modified = datetime.datetime.fromtimestamp(modified) | |
|
50 | d["idleFor"] = idle_for = NOW - modified | |
|
51 | if idle_for.total_seconds() > older_than: | |
|
52 | olds.append(d) | |
|
53 | return olds | |
|
54 | ||
|
55 | ||
|
56 | def nudge_diff(phab, diff): | |
|
57 | """Comment on the idle diff and reject it""" | |
|
58 | diff_id = int(d['id']) | |
|
59 | phab.differential.createcomment( | |
|
60 | revision_id=diff_id, message=MESSAGE, action="reject" | |
|
61 | ) | |
|
62 | ||
|
63 | ||
|
64 | if not USER: | |
|
65 | print( | |
|
66 | "not user specified please set PHABBOT_USER and PHABBOT_TOKEN", | |
|
67 | file=sys.stderr, | |
|
68 | ) | |
|
69 | elif not TOKEN: | |
|
70 | print( | |
|
71 | "not api-token specified please set PHABBOT_USER and PHABBOT_TOKEN", | |
|
72 | file=sys.stderr, | |
|
73 | ) | |
|
74 | sys.exit(1) | |
|
75 | ||
|
76 | phab = phabricator.Phabricator(USER, host=PHAB_URL, token=TOKEN) | |
|
77 | phab.connect() | |
|
78 | phab.update_interfaces() | |
|
79 | print('Hello "%s".' % phab.user.whoami()['realName']) | |
|
80 | ||
|
81 | diffs = get_all_diff(phab) | |
|
82 | print("Found %d Diffs" % len(diffs)) | |
|
83 | olds = filter_diffs(diffs, DELAY) | |
|
84 | print("Found %d old Diffs" % len(olds)) | |
|
85 | for d in olds: | |
|
86 | diff_id = d['id'] | |
|
87 | status = d['statusName'] | |
|
88 | modified = int(d['dateModified']) | |
|
89 | idle_for = d["idleFor"] | |
|
90 | msg = 'nudging D%s in "%s" state for %s' | |
|
91 | print(msg % (diff_id, status, idle_for)) | |
|
92 | # uncomment to actually affect phab | |
|
93 | nudge_diff(phab, d) |
@@ -0,0 +1,219 b'' | |||
|
1 | # Copyright 2020 Joerg Sonnenberger <joerg@bec.de> | |
|
2 | # | |
|
3 | # This software may be used and distributed according to the terms of the | |
|
4 | # GNU General Public License version 2 or any later version. | |
|
5 | """export repositories as git fast-import stream""" | |
|
6 | ||
|
7 | # The format specification for fast-import streams can be found at | |
|
8 | # https://git-scm.com/docs/git-fast-import#_input_format | |
|
9 | ||
|
10 | from __future__ import absolute_import | |
|
11 | import re | |
|
12 | ||
|
13 | from mercurial.i18n import _ | |
|
14 | from mercurial.node import hex, nullrev | |
|
15 | from mercurial.utils import stringutil | |
|
16 | from mercurial import ( | |
|
17 | error, | |
|
18 | pycompat, | |
|
19 | registrar, | |
|
20 | scmutil, | |
|
21 | ) | |
|
22 | from .convert import convcmd | |
|
23 | ||
|
24 | # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for | |
|
25 | # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should | |
|
26 | # be specifying the version(s) of Mercurial they are tested with, or | |
|
27 | # leave the attribute unspecified. | |
|
28 | testedwith = b"ships-with-hg-core" | |
|
29 | ||
|
30 | cmdtable = {} | |
|
31 | command = registrar.command(cmdtable) | |
|
32 | ||
|
33 | GIT_PERSON_PROHIBITED = re.compile(b'[<>\n"]') | |
|
34 | GIT_EMAIL_PROHIBITED = re.compile(b"[<> \n]") | |
|
35 | ||
|
36 | ||
|
37 | def convert_to_git_user(authormap, user, rev): | |
|
38 | mapped_user = authormap.get(user, user) | |
|
39 | user_person = stringutil.person(mapped_user) | |
|
40 | user_email = stringutil.email(mapped_user) | |
|
41 | if GIT_EMAIL_PROHIBITED.match(user_email) or GIT_PERSON_PROHIBITED.match( | |
|
42 | user_person | |
|
43 | ): | |
|
44 | raise error.Abort( | |
|
45 | _(b"Unable to parse user into person and email for revision %s") | |
|
46 | % rev | |
|
47 | ) | |
|
48 | if user_person: | |
|
49 | return b'"' + user_person + b'" <' + user_email + b'>' | |
|
50 | else: | |
|
51 | return b"<" + user_email + b">" | |
|
52 | ||
|
53 | ||
|
54 | def convert_to_git_date(date): | |
|
55 | timestamp, utcoff = date | |
|
56 | tzsign = b"+" if utcoff < 0 else b"-" | |
|
57 | if utcoff % 60 != 0: | |
|
58 | raise error.Abort( | |
|
59 | _(b"UTC offset in %b is not an integer number of seconds") % (date,) | |
|
60 | ) | |
|
61 | utcoff = abs(utcoff) // 60 | |
|
62 | tzh = utcoff // 60 | |
|
63 | tzmin = utcoff % 60 | |
|
64 | return b"%d " % int(timestamp) + tzsign + b"%02d%02d" % (tzh, tzmin) | |
|
65 | ||
|
66 | ||
|
67 | def convert_to_git_ref(branch): | |
|
68 | # XXX filter/map depending on git restrictions | |
|
69 | return b"refs/heads/" + branch | |
|
70 | ||
|
71 | ||
|
72 | def write_data(buf, data, skip_newline): | |
|
73 | buf.append(b"data %d\n" % len(data)) | |
|
74 | buf.append(data) | |
|
75 | if not skip_newline or data[-1:] != b"\n": | |
|
76 | buf.append(b"\n") | |
|
77 | ||
|
78 | ||
|
79 | def export_commit(ui, repo, rev, marks, authormap): | |
|
80 | ctx = repo[rev] | |
|
81 | revid = ctx.hex() | |
|
82 | if revid in marks: | |
|
83 | ui.warn(_(b"warning: revision %s already exported, skipped\n") % revid) | |
|
84 | return | |
|
85 | parents = [p for p in ctx.parents() if p.rev() != nullrev] | |
|
86 | for p in parents: | |
|
87 | if p.hex() not in marks: | |
|
88 | ui.warn( | |
|
89 | _(b"warning: parent %s of %s has not been exported, skipped\n") | |
|
90 | % (p, revid) | |
|
91 | ) | |
|
92 | return | |
|
93 | ||
|
94 | # For all files modified by the commit, check if they have already | |
|
95 | # been exported and otherwise dump the blob with the new mark. | |
|
96 | for fname in ctx.files(): | |
|
97 | if fname not in ctx: | |
|
98 | continue | |
|
99 | filectx = ctx.filectx(fname) | |
|
100 | filerev = hex(filectx.filenode()) | |
|
101 | if filerev not in marks: | |
|
102 | mark = len(marks) + 1 | |
|
103 | marks[filerev] = mark | |
|
104 | data = filectx.data() | |
|
105 | buf = [b"blob\n", b"mark :%d\n" % mark] | |
|
106 | write_data(buf, data, False) | |
|
107 | ui.write(*buf, keepprogressbar=True) | |
|
108 | del buf | |
|
109 | ||
|
110 | # Assign a mark for the current revision for references by | |
|
111 | # latter merge commits. | |
|
112 | mark = len(marks) + 1 | |
|
113 | marks[revid] = mark | |
|
114 | ||
|
115 | ref = convert_to_git_ref(ctx.branch()) | |
|
116 | buf = [ | |
|
117 | b"commit %s\n" % ref, | |
|
118 | b"mark :%d\n" % mark, | |
|
119 | b"committer %s %s\n" | |
|
120 | % ( | |
|
121 | convert_to_git_user(authormap, ctx.user(), revid), | |
|
122 | convert_to_git_date(ctx.date()), | |
|
123 | ), | |
|
124 | ] | |
|
125 | write_data(buf, ctx.description(), True) | |
|
126 | if parents: | |
|
127 | buf.append(b"from :%d\n" % marks[parents[0].hex()]) | |
|
128 | if len(parents) == 2: | |
|
129 | buf.append(b"merge :%d\n" % marks[parents[1].hex()]) | |
|
130 | p0ctx = repo[parents[0]] | |
|
131 | files = ctx.manifest().diff(p0ctx.manifest()) | |
|
132 | else: | |
|
133 | files = ctx.files() | |
|
134 | filebuf = [] | |
|
135 | for fname in files: | |
|
136 | if fname not in ctx: | |
|
137 | filebuf.append((fname, b"D %s\n" % fname)) | |
|
138 | else: | |
|
139 | filectx = ctx.filectx(fname) | |
|
140 | filerev = filectx.filenode() | |
|
141 | fileperm = b"755" if filectx.isexec() else b"644" | |
|
142 | changed = b"M %s :%d %s\n" % (fileperm, marks[hex(filerev)], fname) | |
|
143 | filebuf.append((fname, changed)) | |
|
144 | filebuf.sort() | |
|
145 | buf.extend(changed for (fname, changed) in filebuf) | |
|
146 | del filebuf | |
|
147 | buf.append(b"\n") | |
|
148 | ui.write(*buf, keepprogressbar=True) | |
|
149 | del buf | |
|
150 | ||
|
151 | ||
|
152 | isrev = re.compile(b"^[0-9a-f]{40}$") | |
|
153 | ||
|
154 | ||
|
155 | @command( | |
|
156 | b"fastexport", | |
|
157 | [ | |
|
158 | (b"r", b"rev", [], _(b"revisions to export"), _(b"REV")), | |
|
159 | (b"i", b"import-marks", b"", _(b"old marks file to read"), _(b"FILE")), | |
|
160 | (b"e", b"export-marks", b"", _(b"new marks file to write"), _(b"FILE")), | |
|
161 | ( | |
|
162 | b"A", | |
|
163 | b"authormap", | |
|
164 | b"", | |
|
165 | _(b"remap usernames using this file"), | |
|
166 | _(b"FILE"), | |
|
167 | ), | |
|
168 | ], | |
|
169 | _(b"[OPTION]... [REV]..."), | |
|
170 | helpcategory=command.CATEGORY_IMPORT_EXPORT, | |
|
171 | ) | |
|
172 | def fastexport(ui, repo, *revs, **opts): | |
|
173 | """export repository as git fast-import stream | |
|
174 | ||
|
175 | This command lets you dump a repository as a human-readable text stream. | |
|
176 | It can be piped into corresponding import routines like "git fast-import". | |
|
177 | Incremental dumps can be created by using marks files. | |
|
178 | """ | |
|
179 | opts = pycompat.byteskwargs(opts) | |
|
180 | ||
|
181 | revs += tuple(opts.get(b"rev", [])) | |
|
182 | if not revs: | |
|
183 | revs = scmutil.revrange(repo, [b":"]) | |
|
184 | else: | |
|
185 | revs = scmutil.revrange(repo, revs) | |
|
186 | if not revs: | |
|
187 | raise error.Abort(_(b"no revisions matched")) | |
|
188 | authorfile = opts.get(b"authormap") | |
|
189 | if authorfile: | |
|
190 | authormap = convcmd.readauthormap(ui, authorfile) | |
|
191 | else: | |
|
192 | authormap = {} | |
|
193 | ||
|
194 | import_marks = opts.get(b"import_marks") | |
|
195 | marks = {} | |
|
196 | if import_marks: | |
|
197 | with open(import_marks, "rb") as import_marks_file: | |
|
198 | for line in import_marks_file: | |
|
199 | line = line.strip() | |
|
200 | if not isrev.match(line) or line in marks: | |
|
201 | raise error.Abort(_(b"Corrupted marks file")) | |
|
202 | marks[line] = len(marks) + 1 | |
|
203 | ||
|
204 | revs.sort() | |
|
205 | with ui.makeprogress( | |
|
206 | _(b"exporting"), unit=_(b"revisions"), total=len(revs) | |
|
207 | ) as progress: | |
|
208 | for rev in revs: | |
|
209 | export_commit(ui, repo, rev, marks, authormap) | |
|
210 | progress.increment() | |
|
211 | ||
|
212 | export_marks = opts.get(b"export_marks") | |
|
213 | if export_marks: | |
|
214 | with open(export_marks, "wb") as export_marks_file: | |
|
215 | output_marks = [None] * len(marks) | |
|
216 | for k, v in marks.items(): | |
|
217 | output_marks[v - 1] = k | |
|
218 | for k in output_marks: | |
|
219 | export_marks_file.write(k + b"\n") |
@@ -0,0 +1,30 b'' | |||
|
1 | Octopus Merge Support | |
|
2 | ===================== | |
|
3 | ||
|
4 | This will be moderately complicated, as we'll need to synthesize phony | |
|
5 | changeset entries to explode the octopus into "revisions" that only | |
|
6 | have two parents each. For today, we can probably just do something like | |
|
7 | ||
|
8 | aaaaaaaaaaaaaaaaaaXX{20 bytes of exploded node's hex sha} | |
|
9 | ||
|
10 | where XX is a counter (so we could have as many as 255 parents in a | |
|
11 | git commit - more than I think we'd ever see.) That means that we can | |
|
12 | install some check in this extension to disallow checking out or | |
|
13 | otherwise interacting with the `aaaaaaaaaaaaaaaaaa` revisions. | |
|
14 | ||
|
15 | ||
|
16 | Interface Creation | |
|
17 | ==================== | |
|
18 | ||
|
19 | We at least need an interface definition for `changelog` in core that | |
|
20 | this extension can satisfy, and again for `basicstore`. | |
|
21 | ||
|
22 | ||
|
23 | Reason About Locking | |
|
24 | ==================== | |
|
25 | ||
|
26 | We should spend some time thinking hard about locking, especially on | |
|
27 | .git/index etc. We're probably adequately locking the _git_ | |
|
28 | repository, but may not have enough locking correctness in places | |
|
29 | where hg does locking that git isn't aware of (notably the working | |
|
30 | copy, which I believe Git does not lock.) |
@@ -0,0 +1,318 b'' | |||
|
1 | """grant Mercurial the ability to operate on Git repositories. (EXPERIMENTAL) | |
|
2 | ||
|
3 | This is currently super experimental. It probably will consume your | |
|
4 | firstborn a la Rumpelstiltskin, etc. | |
|
5 | """ | |
|
6 | ||
|
7 | from __future__ import absolute_import | |
|
8 | ||
|
9 | import os | |
|
10 | ||
|
11 | from mercurial.i18n import _ | |
|
12 | ||
|
13 | from mercurial import ( | |
|
14 | commands, | |
|
15 | error, | |
|
16 | extensions, | |
|
17 | localrepo, | |
|
18 | pycompat, | |
|
19 | scmutil, | |
|
20 | store, | |
|
21 | util, | |
|
22 | ) | |
|
23 | ||
|
24 | from . import ( | |
|
25 | dirstate, | |
|
26 | gitlog, | |
|
27 | gitutil, | |
|
28 | index, | |
|
29 | ) | |
|
30 | ||
|
31 | # TODO: extract an interface for this in core | |
|
32 | class gitstore(object): # store.basicstore): | |
|
33 | def __init__(self, path, vfstype): | |
|
34 | self.vfs = vfstype(path) | |
|
35 | self.path = self.vfs.base | |
|
36 | self.createmode = store._calcmode(self.vfs) | |
|
37 | # above lines should go away in favor of: | |
|
38 | # super(gitstore, self).__init__(path, vfstype) | |
|
39 | ||
|
40 | self.git = gitutil.get_pygit2().Repository( | |
|
41 | os.path.normpath(os.path.join(path, b'..', b'.git')) | |
|
42 | ) | |
|
43 | self._progress_factory = lambda *args, **kwargs: None | |
|
44 | ||
|
45 | @util.propertycache | |
|
46 | def _db(self): | |
|
47 | # We lazy-create the database because we want to thread a | |
|
48 | # progress callback down to the indexing process if it's | |
|
49 | # required, and we don't have a ui handle in makestore(). | |
|
50 | return index.get_index(self.git, self._progress_factory) | |
|
51 | ||
|
52 | def join(self, f): | |
|
53 | """Fake store.join method for git repositories. | |
|
54 | ||
|
55 | For the most part, store.join is used for @storecache | |
|
56 | decorators to invalidate caches when various files | |
|
57 | change. We'll map the ones we care about, and ignore the rest. | |
|
58 | """ | |
|
59 | if f in (b'00changelog.i', b'00manifest.i'): | |
|
60 | # This is close enough: in order for the changelog cache | |
|
61 | # to be invalidated, HEAD will have to change. | |
|
62 | return os.path.join(self.path, b'HEAD') | |
|
63 | elif f == b'lock': | |
|
64 | # TODO: we probably want to map this to a git lock, I | |
|
65 | # suspect index.lock. We should figure out what the | |
|
66 | # most-alike file is in git-land. For now we're risking | |
|
67 | # bad concurrency errors if another git client is used. | |
|
68 | return os.path.join(self.path, b'hgit-bogus-lock') | |
|
69 | elif f in (b'obsstore', b'phaseroots', b'narrowspec', b'bookmarks'): | |
|
70 | return os.path.join(self.path, b'..', b'.hg', f) | |
|
71 | raise NotImplementedError(b'Need to pick file for %s.' % f) | |
|
72 | ||
|
73 | def changelog(self, trypending): | |
|
74 | # TODO we don't have a plan for trypending in hg's git support yet | |
|
75 | return gitlog.changelog(self.git, self._db) | |
|
76 | ||
|
77 | def manifestlog(self, repo, storenarrowmatch): | |
|
78 | # TODO handle storenarrowmatch and figure out if we need the repo arg | |
|
79 | return gitlog.manifestlog(self.git, self._db) | |
|
80 | ||
|
81 | def invalidatecaches(self): | |
|
82 | pass | |
|
83 | ||
|
84 | def write(self, tr=None): | |
|
85 | # normally this handles things like fncache writes, which we don't have | |
|
86 | pass | |
|
87 | ||
|
88 | ||
|
89 | def _makestore(orig, requirements, storebasepath, vfstype): | |
|
90 | if b'git' in requirements: | |
|
91 | if not os.path.exists(os.path.join(storebasepath, b'..', b'.git')): | |
|
92 | raise error.Abort( | |
|
93 | _( | |
|
94 | b'repository specified git format in ' | |
|
95 | b'.hg/requires but has no .git directory' | |
|
96 | ) | |
|
97 | ) | |
|
98 | # Check for presence of pygit2 only here. The assumption is that we'll | |
|
99 | # run this code iff we'll later need pygit2. | |
|
100 | if gitutil.get_pygit2() is None: | |
|
101 | raise error.Abort( | |
|
102 | _( | |
|
103 | b'the git extension requires the Python ' | |
|
104 | b'pygit2 library to be installed' | |
|
105 | ) | |
|
106 | ) | |
|
107 | ||
|
108 | return gitstore(storebasepath, vfstype) | |
|
109 | return orig(requirements, storebasepath, vfstype) | |
|
110 | ||
|
111 | ||
|
112 | class gitfilestorage(object): | |
|
113 | def file(self, path): | |
|
114 | if path[0:1] == b'/': | |
|
115 | path = path[1:] | |
|
116 | return gitlog.filelog(self.store.git, self.store._db, path) | |
|
117 | ||
|
118 | ||
|
119 | def _makefilestorage(orig, requirements, features, **kwargs): | |
|
120 | store = kwargs['store'] | |
|
121 | if isinstance(store, gitstore): | |
|
122 | return gitfilestorage | |
|
123 | return orig(requirements, features, **kwargs) | |
|
124 | ||
|
125 | ||
|
126 | def _setupdothg(ui, path): | |
|
127 | dothg = os.path.join(path, b'.hg') | |
|
128 | if os.path.exists(dothg): | |
|
129 | ui.warn(_(b'git repo already initialized for hg\n')) | |
|
130 | else: | |
|
131 | os.mkdir(os.path.join(path, b'.hg')) | |
|
132 | # TODO is it ok to extend .git/info/exclude like this? | |
|
133 | with open( | |
|
134 | os.path.join(path, b'.git', b'info', b'exclude'), 'ab' | |
|
135 | ) as exclude: | |
|
136 | exclude.write(b'\n.hg\n') | |
|
137 | with open(os.path.join(dothg, b'requires'), 'wb') as f: | |
|
138 | f.write(b'git\n') | |
|
139 | ||
|
140 | ||
|
141 | _BMS_PREFIX = 'refs/heads/' | |
|
142 | ||
|
143 | ||
|
144 | class gitbmstore(object): | |
|
145 | def __init__(self, gitrepo): | |
|
146 | self.gitrepo = gitrepo | |
|
147 | self._aclean = True | |
|
148 | self._active = gitrepo.references['HEAD'] # git head, not mark | |
|
149 | ||
|
150 | def __contains__(self, name): | |
|
151 | return ( | |
|
152 | _BMS_PREFIX + pycompat.fsdecode(name) | |
|
153 | ) in self.gitrepo.references | |
|
154 | ||
|
155 | def __iter__(self): | |
|
156 | for r in self.gitrepo.listall_references(): | |
|
157 | if r.startswith(_BMS_PREFIX): | |
|
158 | yield pycompat.fsencode(r[len(_BMS_PREFIX) :]) | |
|
159 | ||
|
160 | def __getitem__(self, k): | |
|
161 | return ( | |
|
162 | self.gitrepo.references[_BMS_PREFIX + pycompat.fsdecode(k)] | |
|
163 | .peel() | |
|
164 | .id.raw | |
|
165 | ) | |
|
166 | ||
|
167 | def get(self, k, default=None): | |
|
168 | try: | |
|
169 | if k in self: | |
|
170 | return self[k] | |
|
171 | return default | |
|
172 | except gitutil.get_pygit2().InvalidSpecError: | |
|
173 | return default | |
|
174 | ||
|
175 | @property | |
|
176 | def active(self): | |
|
177 | h = self.gitrepo.references['HEAD'] | |
|
178 | if not isinstance(h.target, str) or not h.target.startswith( | |
|
179 | _BMS_PREFIX | |
|
180 | ): | |
|
181 | return None | |
|
182 | return pycompat.fsencode(h.target[len(_BMS_PREFIX) :]) | |
|
183 | ||
|
184 | @active.setter | |
|
185 | def active(self, mark): | |
|
186 | githead = mark is not None and (_BMS_PREFIX + mark) or None | |
|
187 | if githead is not None and githead not in self.gitrepo.references: | |
|
188 | raise AssertionError(b'bookmark %s does not exist!' % mark) | |
|
189 | ||
|
190 | self._active = githead | |
|
191 | self._aclean = False | |
|
192 | ||
|
193 | def _writeactive(self): | |
|
194 | if self._aclean: | |
|
195 | return | |
|
196 | self.gitrepo.references.create('HEAD', self._active, True) | |
|
197 | self._aclean = True | |
|
198 | ||
|
199 | def names(self, node): | |
|
200 | r = [] | |
|
201 | for ref in self.gitrepo.listall_references(): | |
|
202 | if not ref.startswith(_BMS_PREFIX): | |
|
203 | continue | |
|
204 | if self.gitrepo.references[ref].peel().id.raw != node: | |
|
205 | continue | |
|
206 | r.append(pycompat.fsencode(ref[len(_BMS_PREFIX) :])) | |
|
207 | return r | |
|
208 | ||
|
209 | # Cleanup opportunity: this is *identical* to core's bookmarks store. | |
|
210 | def expandname(self, bname): | |
|
211 | if bname == b'.': | |
|
212 | if self.active: | |
|
213 | return self.active | |
|
214 | raise error.RepoLookupError(_(b"no active bookmark")) | |
|
215 | return bname | |
|
216 | ||
|
217 | def applychanges(self, repo, tr, changes): | |
|
218 | """Apply a list of changes to bookmarks | |
|
219 | """ | |
|
220 | # TODO: this should respect transactions, but that's going to | |
|
221 | # require enlarging the gitbmstore to know how to do in-memory | |
|
222 | # temporary writes and read those back prior to transaction | |
|
223 | # finalization. | |
|
224 | for name, node in changes: | |
|
225 | if node is None: | |
|
226 | self.gitrepo.references.delete( | |
|
227 | _BMS_PREFIX + pycompat.fsdecode(name) | |
|
228 | ) | |
|
229 | else: | |
|
230 | self.gitrepo.references.create( | |
|
231 | _BMS_PREFIX + pycompat.fsdecode(name), | |
|
232 | gitutil.togitnode(node), | |
|
233 | force=True, | |
|
234 | ) | |
|
235 | ||
|
236 | def checkconflict(self, mark, force=False, target=None): | |
|
237 | githead = _BMS_PREFIX + mark | |
|
238 | cur = self.gitrepo.references['HEAD'] | |
|
239 | if githead in self.gitrepo.references and not force: | |
|
240 | if target: | |
|
241 | if self.gitrepo.references[githead] == target and target == cur: | |
|
242 | # re-activating a bookmark | |
|
243 | return [] | |
|
244 | # moving a bookmark - forward? | |
|
245 | raise NotImplementedError | |
|
246 | raise error.Abort( | |
|
247 | _(b"bookmark '%s' already exists (use -f to force)") % mark | |
|
248 | ) | |
|
249 | if len(mark) > 3 and not force: | |
|
250 | try: | |
|
251 | shadowhash = scmutil.isrevsymbol(self._repo, mark) | |
|
252 | except error.LookupError: # ambiguous identifier | |
|
253 | shadowhash = False | |
|
254 | if shadowhash: | |
|
255 | self._repo.ui.warn( | |
|
256 | _( | |
|
257 | b"bookmark %s matches a changeset hash\n" | |
|
258 | b"(did you leave a -r out of an 'hg bookmark' " | |
|
259 | b"command?)\n" | |
|
260 | ) | |
|
261 | % mark | |
|
262 | ) | |
|
263 | return [] | |
|
264 | ||
|
265 | ||
|
266 | def init(orig, ui, dest=b'.', **opts): | |
|
267 | if opts.get('git', False): | |
|
268 | path = os.path.abspath(dest) | |
|
269 | # TODO: walk up looking for the git repo | |
|
270 | _setupdothg(ui, path) | |
|
271 | return 0 | |
|
272 | return orig(ui, dest=dest, **opts) | |
|
273 | ||
|
274 | ||
|
275 | def reposetup(ui, repo): | |
|
276 | if repo.local() and isinstance(repo.store, gitstore): | |
|
277 | orig = repo.__class__ | |
|
278 | repo.store._progress_factory = repo.ui.makeprogress | |
|
279 | ||
|
280 | class gitlocalrepo(orig): | |
|
281 | def _makedirstate(self): | |
|
282 | # TODO narrow support here | |
|
283 | return dirstate.gitdirstate( | |
|
284 | self.ui, self.vfs.base, self.store.git | |
|
285 | ) | |
|
286 | ||
|
287 | def commit(self, *args, **kwargs): | |
|
288 | ret = orig.commit(self, *args, **kwargs) | |
|
289 | tid = self.store.git[gitutil.togitnode(ret)].tree.id | |
|
290 | # DANGER! This will flush any writes staged to the | |
|
291 | # index in Git, but we're sidestepping the index in a | |
|
292 | # way that confuses git when we commit. Alas. | |
|
293 | self.store.git.index.read_tree(tid) | |
|
294 | self.store.git.index.write() | |
|
295 | return ret | |
|
296 | ||
|
297 | @property | |
|
298 | def _bookmarks(self): | |
|
299 | return gitbmstore(self.store.git) | |
|
300 | ||
|
301 | repo.__class__ = gitlocalrepo | |
|
302 | return repo | |
|
303 | ||
|
304 | ||
|
305 | def _featuresetup(ui, supported): | |
|
306 | # don't die on seeing a repo with the git requirement | |
|
307 | supported |= {b'git'} | |
|
308 | ||
|
309 | ||
|
310 | def extsetup(ui): | |
|
311 | extensions.wrapfunction(localrepo, b'makestore', _makestore) | |
|
312 | extensions.wrapfunction(localrepo, b'makefilestorage', _makefilestorage) | |
|
313 | # Inject --git flag for `hg init` | |
|
314 | entry = extensions.wrapcommand(commands.table, b'init', init) | |
|
315 | entry[1].extend( | |
|
316 | [(b'', b'git', None, b'setup up a git repository instead of hg')] | |
|
317 | ) | |
|
318 | localrepo.featuresetupfuncs.add(_featuresetup) |
@@ -0,0 +1,307 b'' | |||
|
1 | from __future__ import absolute_import | |
|
2 | ||
|
3 | import contextlib | |
|
4 | import errno | |
|
5 | import os | |
|
6 | ||
|
7 | from mercurial import ( | |
|
8 | error, | |
|
9 | extensions, | |
|
10 | match as matchmod, | |
|
11 | node as nodemod, | |
|
12 | pycompat, | |
|
13 | scmutil, | |
|
14 | util, | |
|
15 | ) | |
|
16 | from mercurial.interfaces import ( | |
|
17 | dirstate as intdirstate, | |
|
18 | util as interfaceutil, | |
|
19 | ) | |
|
20 | ||
|
21 | from . import gitutil | |
|
22 | ||
|
23 | pygit2 = gitutil.get_pygit2() | |
|
24 | ||
|
25 | ||
|
26 | def readpatternfile(orig, filepath, warn, sourceinfo=False): | |
|
27 | if not (b'info/exclude' in filepath or filepath.endswith(b'.gitignore')): | |
|
28 | return orig(filepath, warn, sourceinfo=False) | |
|
29 | result = [] | |
|
30 | warnings = [] | |
|
31 | with open(filepath, b'rb') as fp: | |
|
32 | for l in fp: | |
|
33 | l = l.strip() | |
|
34 | if not l or l.startswith(b'#'): | |
|
35 | continue | |
|
36 | if l.startswith(b'!'): | |
|
37 | warnings.append(b'unsupported ignore pattern %s' % l) | |
|
38 | continue | |
|
39 | if l.startswith(b'/'): | |
|
40 | result.append(b'rootglob:' + l[1:]) | |
|
41 | else: | |
|
42 | result.append(b'relglob:' + l) | |
|
43 | return result, warnings | |
|
44 | ||
|
45 | ||
|
46 | extensions.wrapfunction(matchmod, b'readpatternfile', readpatternfile) | |
|
47 | ||
|
48 | ||
|
49 | _STATUS_MAP = {} | |
|
50 | if pygit2: | |
|
51 | _STATUS_MAP = { | |
|
52 | pygit2.GIT_STATUS_CONFLICTED: b'm', | |
|
53 | pygit2.GIT_STATUS_CURRENT: b'n', | |
|
54 | pygit2.GIT_STATUS_IGNORED: b'?', | |
|
55 | pygit2.GIT_STATUS_INDEX_DELETED: b'r', | |
|
56 | pygit2.GIT_STATUS_INDEX_MODIFIED: b'n', | |
|
57 | pygit2.GIT_STATUS_INDEX_NEW: b'a', | |
|
58 | pygit2.GIT_STATUS_INDEX_RENAMED: b'a', | |
|
59 | pygit2.GIT_STATUS_INDEX_TYPECHANGE: b'n', | |
|
60 | pygit2.GIT_STATUS_WT_DELETED: b'r', | |
|
61 | pygit2.GIT_STATUS_WT_MODIFIED: b'n', | |
|
62 | pygit2.GIT_STATUS_WT_NEW: b'?', | |
|
63 | pygit2.GIT_STATUS_WT_RENAMED: b'a', | |
|
64 | pygit2.GIT_STATUS_WT_TYPECHANGE: b'n', | |
|
65 | pygit2.GIT_STATUS_WT_UNREADABLE: b'?', | |
|
66 | pygit2.GIT_STATUS_INDEX_MODIFIED | pygit2.GIT_STATUS_WT_MODIFIED: 'm', | |
|
67 | } | |
|
68 | ||
|
69 | ||
|
70 | @interfaceutil.implementer(intdirstate.idirstate) | |
|
71 | class gitdirstate(object): | |
|
72 | def __init__(self, ui, root, gitrepo): | |
|
73 | self._ui = ui | |
|
74 | self._root = os.path.dirname(root) | |
|
75 | self.git = gitrepo | |
|
76 | self._plchangecallbacks = {} | |
|
77 | ||
|
78 | def p1(self): | |
|
79 | try: | |
|
80 | return self.git.head.peel().id.raw | |
|
81 | except pygit2.GitError: | |
|
82 | # Typically happens when peeling HEAD fails, as in an | |
|
83 | # empty repository. | |
|
84 | return nodemod.nullid | |
|
85 | ||
|
86 | def p2(self): | |
|
87 | # TODO: MERGE_HEAD? something like that, right? | |
|
88 | return nodemod.nullid | |
|
89 | ||
|
90 | def setparents(self, p1, p2=nodemod.nullid): | |
|
91 | assert p2 == nodemod.nullid, b'TODO merging support' | |
|
92 | self.git.head.set_target(gitutil.togitnode(p1)) | |
|
93 | ||
|
94 | @util.propertycache | |
|
95 | def identity(self): | |
|
96 | return util.filestat.frompath( | |
|
97 | os.path.join(self._root, b'.git', b'index') | |
|
98 | ) | |
|
99 | ||
|
100 | def branch(self): | |
|
101 | return b'default' | |
|
102 | ||
|
103 | def parents(self): | |
|
104 | # TODO how on earth do we find p2 if a merge is in flight? | |
|
105 | return self.p1(), nodemod.nullid | |
|
106 | ||
|
107 | def __iter__(self): | |
|
108 | return (pycompat.fsencode(f.path) for f in self.git.index) | |
|
109 | ||
|
110 | def items(self): | |
|
111 | for ie in self.git.index: | |
|
112 | yield ie.path, None # value should be a dirstatetuple | |
|
113 | ||
|
114 | # py2,3 compat forward | |
|
115 | iteritems = items | |
|
116 | ||
|
117 | def __getitem__(self, filename): | |
|
118 | try: | |
|
119 | gs = self.git.status_file(filename) | |
|
120 | except KeyError: | |
|
121 | return b'?' | |
|
122 | return _STATUS_MAP[gs] | |
|
123 | ||
|
124 | def __contains__(self, filename): | |
|
125 | try: | |
|
126 | gs = self.git.status_file(filename) | |
|
127 | return _STATUS_MAP[gs] != b'?' | |
|
128 | except KeyError: | |
|
129 | return False | |
|
130 | ||
|
131 | def status(self, match, subrepos, ignored, clean, unknown): | |
|
132 | # TODO handling of clean files - can we get that from git.status()? | |
|
133 | modified, added, removed, deleted, unknown, ignored, clean = ( | |
|
134 | [], | |
|
135 | [], | |
|
136 | [], | |
|
137 | [], | |
|
138 | [], | |
|
139 | [], | |
|
140 | [], | |
|
141 | ) | |
|
142 | gstatus = self.git.status() | |
|
143 | for path, status in gstatus.items(): | |
|
144 | path = pycompat.fsencode(path) | |
|
145 | if status == pygit2.GIT_STATUS_IGNORED: | |
|
146 | if path.endswith(b'/'): | |
|
147 | continue | |
|
148 | ignored.append(path) | |
|
149 | elif status in ( | |
|
150 | pygit2.GIT_STATUS_WT_MODIFIED, | |
|
151 | pygit2.GIT_STATUS_INDEX_MODIFIED, | |
|
152 | pygit2.GIT_STATUS_WT_MODIFIED | |
|
153 | | pygit2.GIT_STATUS_INDEX_MODIFIED, | |
|
154 | ): | |
|
155 | modified.append(path) | |
|
156 | elif status == pygit2.GIT_STATUS_INDEX_NEW: | |
|
157 | added.append(path) | |
|
158 | elif status == pygit2.GIT_STATUS_WT_NEW: | |
|
159 | unknown.append(path) | |
|
160 | elif status == pygit2.GIT_STATUS_WT_DELETED: | |
|
161 | deleted.append(path) | |
|
162 | elif status == pygit2.GIT_STATUS_INDEX_DELETED: | |
|
163 | removed.append(path) | |
|
164 | else: | |
|
165 | raise error.Abort( | |
|
166 | b'unhandled case: status for %r is %r' % (path, status) | |
|
167 | ) | |
|
168 | ||
|
169 | # TODO are we really always sure of status here? | |
|
170 | return ( | |
|
171 | False, | |
|
172 | scmutil.status( | |
|
173 | modified, added, removed, deleted, unknown, ignored, clean | |
|
174 | ), | |
|
175 | ) | |
|
176 | ||
|
177 | def flagfunc(self, buildfallback): | |
|
178 | # TODO we can do better | |
|
179 | return buildfallback() | |
|
180 | ||
|
181 | def getcwd(self): | |
|
182 | # TODO is this a good way to do this? | |
|
183 | return os.path.dirname( | |
|
184 | os.path.dirname(pycompat.fsencode(self.git.path)) | |
|
185 | ) | |
|
186 | ||
|
187 | def normalize(self, path): | |
|
188 | normed = util.normcase(path) | |
|
189 | assert normed == path, b"TODO handling of case folding: %s != %s" % ( | |
|
190 | normed, | |
|
191 | path, | |
|
192 | ) | |
|
193 | return path | |
|
194 | ||
|
195 | @property | |
|
196 | def _checklink(self): | |
|
197 | return util.checklink(os.path.dirname(pycompat.fsencode(self.git.path))) | |
|
198 | ||
|
199 | def copies(self): | |
|
200 | # TODO support copies? | |
|
201 | return {} | |
|
202 | ||
|
203 | # # TODO what the heck is this | |
|
204 | _filecache = set() | |
|
205 | ||
|
206 | def pendingparentchange(self): | |
|
207 | # TODO: we need to implement the context manager bits and | |
|
208 | # correctly stage/revert index edits. | |
|
209 | return False | |
|
210 | ||
|
211 | def write(self, tr): | |
|
212 | # TODO: call parent change callbacks | |
|
213 | ||
|
214 | if tr: | |
|
215 | ||
|
216 | def writeinner(category): | |
|
217 | self.git.index.write() | |
|
218 | ||
|
219 | tr.addpending(b'gitdirstate', writeinner) | |
|
220 | else: | |
|
221 | self.git.index.write() | |
|
222 | ||
|
223 | def pathto(self, f, cwd=None): | |
|
224 | if cwd is None: | |
|
225 | cwd = self.getcwd() | |
|
226 | # TODO core dirstate does something about slashes here | |
|
227 | assert isinstance(f, bytes) | |
|
228 | r = util.pathto(self._root, cwd, f) | |
|
229 | return r | |
|
230 | ||
|
231 | def matches(self, match): | |
|
232 | for x in self.git.index: | |
|
233 | p = pycompat.fsencode(x.path) | |
|
234 | if match(p): | |
|
235 | yield p | |
|
236 | ||
|
237 | def normal(self, f, parentfiledata=None): | |
|
238 | """Mark a file normal and clean.""" | |
|
239 | # TODO: for now we just let libgit2 re-stat the file. We can | |
|
240 | # clearly do better. | |
|
241 | ||
|
242 | def normallookup(self, f): | |
|
243 | """Mark a file normal, but possibly dirty.""" | |
|
244 | # TODO: for now we just let libgit2 re-stat the file. We can | |
|
245 | # clearly do better. | |
|
246 | ||
|
247 | def walk(self, match, subrepos, unknown, ignored, full=True): | |
|
248 | # TODO: we need to use .status() and not iterate the index, | |
|
249 | # because the index doesn't force a re-walk and so `hg add` of | |
|
250 | # a new file without an intervening call to status will | |
|
251 | # silently do nothing. | |
|
252 | r = {} | |
|
253 | cwd = self.getcwd() | |
|
254 | for path, status in self.git.status().items(): | |
|
255 | if path.startswith('.hg/'): | |
|
256 | continue | |
|
257 | path = pycompat.fsencode(path) | |
|
258 | if not match(path): | |
|
259 | continue | |
|
260 | # TODO construct the stat info from the status object? | |
|
261 | try: | |
|
262 | s = os.stat(os.path.join(cwd, path)) | |
|
263 | except OSError as e: | |
|
264 | if e.errno != errno.ENOENT: | |
|
265 | raise | |
|
266 | continue | |
|
267 | r[path] = s | |
|
268 | return r | |
|
269 | ||
|
270 | def savebackup(self, tr, backupname): | |
|
271 | # TODO: figure out a strategy for saving index backups. | |
|
272 | pass | |
|
273 | ||
|
274 | def restorebackup(self, tr, backupname): | |
|
275 | # TODO: figure out a strategy for saving index backups. | |
|
276 | pass | |
|
277 | ||
|
278 | def add(self, f): | |
|
279 | self.git.index.add(pycompat.fsdecode(f)) | |
|
280 | ||
|
281 | def drop(self, f): | |
|
282 | self.git.index.remove(pycompat.fsdecode(f)) | |
|
283 | ||
|
284 | def remove(self, f): | |
|
285 | self.git.index.remove(pycompat.fsdecode(f)) | |
|
286 | ||
|
287 | def copied(self, path): | |
|
288 | # TODO: track copies? | |
|
289 | return None | |
|
290 | ||
|
291 | @contextlib.contextmanager | |
|
292 | def parentchange(self): | |
|
293 | # TODO: track this maybe? | |
|
294 | yield | |
|
295 | ||
|
296 | def addparentchangecallback(self, category, callback): | |
|
297 | # TODO: should this be added to the dirstate interface? | |
|
298 | self._plchangecallbacks[category] = callback | |
|
299 | ||
|
300 | def clearbackup(self, tr, backupname): | |
|
301 | # TODO | |
|
302 | pass | |
|
303 | ||
|
304 | def setbranch(self, branch): | |
|
305 | raise error.Abort( | |
|
306 | b'git repos do not support branches. try using bookmarks' | |
|
307 | ) |
@@ -0,0 +1,466 b'' | |||
|
1 | from __future__ import absolute_import | |
|
2 | ||
|
3 | from mercurial.i18n import _ | |
|
4 | ||
|
5 | from mercurial import ( | |
|
6 | ancestor, | |
|
7 | changelog as hgchangelog, | |
|
8 | dagop, | |
|
9 | encoding, | |
|
10 | error, | |
|
11 | manifest, | |
|
12 | node as nodemod, | |
|
13 | pycompat, | |
|
14 | ) | |
|
15 | from mercurial.interfaces import ( | |
|
16 | repository, | |
|
17 | util as interfaceutil, | |
|
18 | ) | |
|
19 | from mercurial.utils import stringutil | |
|
20 | from . import ( | |
|
21 | gitutil, | |
|
22 | index, | |
|
23 | manifest as gitmanifest, | |
|
24 | ) | |
|
25 | ||
|
26 | pygit2 = gitutil.get_pygit2() | |
|
27 | ||
|
28 | ||
|
29 | class baselog(object): # revlog.revlog): | |
|
30 | """Common implementations between changelog and manifestlog.""" | |
|
31 | ||
|
32 | def __init__(self, gr, db): | |
|
33 | self.gitrepo = gr | |
|
34 | self._db = db | |
|
35 | ||
|
36 | def __len__(self): | |
|
37 | return int( | |
|
38 | self._db.execute('SELECT COUNT(*) FROM changelog').fetchone()[0] | |
|
39 | ) | |
|
40 | ||
|
41 | def rev(self, n): | |
|
42 | if n == nodemod.nullid: | |
|
43 | return -1 | |
|
44 | t = self._db.execute( | |
|
45 | 'SELECT rev FROM changelog WHERE node = ?', (gitutil.togitnode(n),) | |
|
46 | ).fetchone() | |
|
47 | if t is None: | |
|
48 | raise error.LookupError(n, b'00changelog.i', _(b'no node %d')) | |
|
49 | return t[0] | |
|
50 | ||
|
51 | def node(self, r): | |
|
52 | if r == nodemod.nullrev: | |
|
53 | return nodemod.nullid | |
|
54 | t = self._db.execute( | |
|
55 | 'SELECT node FROM changelog WHERE rev = ?', (r,) | |
|
56 | ).fetchone() | |
|
57 | if t is None: | |
|
58 | raise error.LookupError(r, b'00changelog.i', _(b'no node')) | |
|
59 | return nodemod.bin(t[0]) | |
|
60 | ||
|
61 | def hasnode(self, n): | |
|
62 | t = self._db.execute( | |
|
63 | 'SELECT node FROM changelog WHERE node = ?', (n,) | |
|
64 | ).fetchone() | |
|
65 | return t is not None | |
|
66 | ||
|
67 | ||
|
68 | class baselogindex(object): | |
|
69 | def __init__(self, log): | |
|
70 | self._log = log | |
|
71 | ||
|
72 | def has_node(self, n): | |
|
73 | return self._log.rev(n) != -1 | |
|
74 | ||
|
75 | def __len__(self): | |
|
76 | return len(self._log) | |
|
77 | ||
|
78 | def __getitem__(self, idx): | |
|
79 | p1rev, p2rev = self._log.parentrevs(idx) | |
|
80 | # TODO: it's messy that the index leaks so far out of the | |
|
81 | # storage layer that we have to implement things like reading | |
|
82 | # this raw tuple, which exposes revlog internals. | |
|
83 | return ( | |
|
84 | # Pretend offset is just the index, since we don't really care. | |
|
85 | idx, | |
|
86 | # Same with lengths | |
|
87 | idx, # length | |
|
88 | idx, # rawsize | |
|
89 | -1, # delta base | |
|
90 | idx, # linkrev TODO is this right? | |
|
91 | p1rev, | |
|
92 | p2rev, | |
|
93 | self._log.node(idx), | |
|
94 | ) | |
|
95 | ||
|
96 | ||
|
97 | # TODO: an interface for the changelog type? | |
|
98 | class changelog(baselog): | |
|
99 | def __contains__(self, rev): | |
|
100 | try: | |
|
101 | self.node(rev) | |
|
102 | return True | |
|
103 | except error.LookupError: | |
|
104 | return False | |
|
105 | ||
|
106 | def __iter__(self): | |
|
107 | return iter(pycompat.xrange(len(self))) | |
|
108 | ||
|
109 | @property | |
|
110 | def filteredrevs(self): | |
|
111 | # TODO: we should probably add a refs/hg/ namespace for hidden | |
|
112 | # heads etc, but that's an idea for later. | |
|
113 | return set() | |
|
114 | ||
|
115 | @property | |
|
116 | def index(self): | |
|
117 | return baselogindex(self) | |
|
118 | ||
|
119 | @property | |
|
120 | def nodemap(self): | |
|
121 | r = { | |
|
122 | nodemod.bin(v[0]): v[1] | |
|
123 | for v in self._db.execute('SELECT node, rev FROM changelog') | |
|
124 | } | |
|
125 | r[nodemod.nullid] = nodemod.nullrev | |
|
126 | return r | |
|
127 | ||
|
128 | def tip(self): | |
|
129 | t = self._db.execute( | |
|
130 | 'SELECT node FROM changelog ORDER BY rev DESC LIMIT 1' | |
|
131 | ).fetchone() | |
|
132 | if t: | |
|
133 | return nodemod.bin(t[0]) | |
|
134 | return nodemod.nullid | |
|
135 | ||
|
136 | def revs(self, start=0, stop=None): | |
|
137 | if stop is None: | |
|
138 | stop = self.tip() | |
|
139 | t = self._db.execute( | |
|
140 | 'SELECT rev FROM changelog ' | |
|
141 | 'WHERE rev >= ? AND rev <= ? ' | |
|
142 | 'ORDER BY REV ASC', | |
|
143 | (start, stop), | |
|
144 | ) | |
|
145 | return (int(r[0]) for r in t) | |
|
146 | ||
|
147 | def _partialmatch(self, id): | |
|
148 | if nodemod.wdirhex.startswith(id): | |
|
149 | raise error.WdirUnsupported | |
|
150 | candidates = [ | |
|
151 | nodemod.bin(x[0]) | |
|
152 | for x in self._db.execute( | |
|
153 | 'SELECT node FROM changelog WHERE node LIKE ?', (id + b'%',) | |
|
154 | ) | |
|
155 | ] | |
|
156 | if nodemod.nullhex.startswith(id): | |
|
157 | candidates.append(nodemod.nullid) | |
|
158 | if len(candidates) > 1: | |
|
159 | raise error.AmbiguousPrefixLookupError( | |
|
160 | id, b'00changelog.i', _(b'ambiguous identifier') | |
|
161 | ) | |
|
162 | if candidates: | |
|
163 | return candidates[0] | |
|
164 | return None | |
|
165 | ||
|
166 | def flags(self, rev): | |
|
167 | return 0 | |
|
168 | ||
|
169 | def shortest(self, node, minlength=1): | |
|
170 | nodehex = nodemod.hex(node) | |
|
171 | for attempt in pycompat.xrange(minlength, len(nodehex) + 1): | |
|
172 | candidate = nodehex[:attempt] | |
|
173 | matches = int( | |
|
174 | self._db.execute( | |
|
175 | 'SELECT COUNT(*) FROM changelog WHERE node LIKE ?', | |
|
176 | (pycompat.sysstr(candidate + b'%'),), | |
|
177 | ).fetchone()[0] | |
|
178 | ) | |
|
179 | if matches == 1: | |
|
180 | return candidate | |
|
181 | return nodehex | |
|
182 | ||
|
183 | def headrevs(self, revs=None): | |
|
184 | realheads = [ | |
|
185 | int(x[0]) | |
|
186 | for x in self._db.execute( | |
|
187 | 'SELECT rev FROM changelog ' | |
|
188 | 'INNER JOIN heads ON changelog.node = heads.node' | |
|
189 | ) | |
|
190 | ] | |
|
191 | if revs: | |
|
192 | return sorted([r for r in revs if r in realheads]) | |
|
193 | return sorted(realheads) | |
|
194 | ||
|
195 | def changelogrevision(self, nodeorrev): | |
|
196 | # Ensure we have a node id | |
|
197 | if isinstance(nodeorrev, int): | |
|
198 | n = self.node(nodeorrev) | |
|
199 | else: | |
|
200 | n = nodeorrev | |
|
201 | # handle looking up nullid | |
|
202 | if n == nodemod.nullid: | |
|
203 | return hgchangelog._changelogrevision(extra={}) | |
|
204 | hn = gitutil.togitnode(n) | |
|
205 | # We've got a real commit! | |
|
206 | files = [ | |
|
207 | r[0] | |
|
208 | for r in self._db.execute( | |
|
209 | 'SELECT filename FROM changedfiles ' | |
|
210 | 'WHERE node = ? and filenode != ?', | |
|
211 | (hn, gitutil.nullgit), | |
|
212 | ) | |
|
213 | ] | |
|
214 | filesremoved = [ | |
|
215 | r[0] | |
|
216 | for r in self._db.execute( | |
|
217 | 'SELECT filename FROM changedfiles ' | |
|
218 | 'WHERE node = ? and filenode = ?', | |
|
219 | (hn, nodemod.nullhex), | |
|
220 | ) | |
|
221 | ] | |
|
222 | c = self.gitrepo[hn] | |
|
223 | return hgchangelog._changelogrevision( | |
|
224 | manifest=n, # pretend manifest the same as the commit node | |
|
225 | user=b'%s <%s>' | |
|
226 | % (c.author.name.encode('utf8'), c.author.email.encode('utf8')), | |
|
227 | date=(c.author.time, -c.author.offset * 60), | |
|
228 | files=files, | |
|
229 | # TODO filesadded in the index | |
|
230 | filesremoved=filesremoved, | |
|
231 | description=c.message.encode('utf8'), | |
|
232 | # TODO do we want to handle extra? how? | |
|
233 | extra={b'branch': b'default'}, | |
|
234 | ) | |
|
235 | ||
|
236 | def ancestors(self, revs, stoprev=0, inclusive=False): | |
|
237 | revs = list(revs) | |
|
238 | tip = self.rev(self.tip()) | |
|
239 | for r in revs: | |
|
240 | if r > tip: | |
|
241 | raise IndexError(b'Invalid rev %r' % r) | |
|
242 | return ancestor.lazyancestors( | |
|
243 | self.parentrevs, revs, stoprev=stoprev, inclusive=inclusive | |
|
244 | ) | |
|
245 | ||
|
246 | # Cleanup opportunity: this is *identical* to the revlog.py version | |
|
247 | def descendants(self, revs): | |
|
248 | return dagop.descendantrevs(revs, self.revs, self.parentrevs) | |
|
249 | ||
|
250 | def reachableroots(self, minroot, heads, roots, includepath=False): | |
|
251 | return dagop._reachablerootspure( | |
|
252 | self.parentrevs, minroot, roots, heads, includepath | |
|
253 | ) | |
|
254 | ||
|
255 | # Cleanup opportunity: this is *identical* to the revlog.py version | |
|
256 | def isancestor(self, a, b): | |
|
257 | a, b = self.rev(a), self.rev(b) | |
|
258 | return self.isancestorrev(a, b) | |
|
259 | ||
|
260 | # Cleanup opportunity: this is *identical* to the revlog.py version | |
|
261 | def isancestorrev(self, a, b): | |
|
262 | if a == nodemod.nullrev: | |
|
263 | return True | |
|
264 | elif a == b: | |
|
265 | return True | |
|
266 | elif a > b: | |
|
267 | return False | |
|
268 | return bool(self.reachableroots(a, [b], [a], includepath=False)) | |
|
269 | ||
|
270 | def parentrevs(self, rev): | |
|
271 | n = self.node(rev) | |
|
272 | hn = gitutil.togitnode(n) | |
|
273 | c = self.gitrepo[hn] | |
|
274 | p1 = p2 = nodemod.nullrev | |
|
275 | if c.parents: | |
|
276 | p1 = self.rev(c.parents[0].id.raw) | |
|
277 | if len(c.parents) > 2: | |
|
278 | raise error.Abort(b'TODO octopus merge handling') | |
|
279 | if len(c.parents) == 2: | |
|
280 | p2 = self.rev(c.parents[1].id.raw) | |
|
281 | return p1, p2 | |
|
282 | ||
|
283 | # Private method is used at least by the tags code. | |
|
284 | _uncheckedparentrevs = parentrevs | |
|
285 | ||
|
286 | def commonancestorsheads(self, a, b): | |
|
287 | # TODO the revlog verson of this has a C path, so we probably | |
|
288 | # need to optimize this... | |
|
289 | a, b = self.rev(a), self.rev(b) | |
|
290 | return [ | |
|
291 | self.node(n) | |
|
292 | for n in ancestor.commonancestorsheads(self.parentrevs, a, b) | |
|
293 | ] | |
|
294 | ||
|
295 | def branchinfo(self, rev): | |
|
296 | """Git doesn't do named branches, so just put everything on default.""" | |
|
297 | return b'default', False | |
|
298 | ||
|
299 | def delayupdate(self, tr): | |
|
300 | # TODO: I think we can elide this because we're just dropping | |
|
301 | # an object in the git repo? | |
|
302 | pass | |
|
303 | ||
|
304 | def add( | |
|
305 | self, | |
|
306 | manifest, | |
|
307 | files, | |
|
308 | desc, | |
|
309 | transaction, | |
|
310 | p1, | |
|
311 | p2, | |
|
312 | user, | |
|
313 | date=None, | |
|
314 | extra=None, | |
|
315 | p1copies=None, | |
|
316 | p2copies=None, | |
|
317 | filesadded=None, | |
|
318 | filesremoved=None, | |
|
319 | ): | |
|
320 | parents = [] | |
|
321 | hp1, hp2 = gitutil.togitnode(p1), gitutil.togitnode(p2) | |
|
322 | if p1 != nodemod.nullid: | |
|
323 | parents.append(hp1) | |
|
324 | if p2 and p2 != nodemod.nullid: | |
|
325 | parents.append(hp2) | |
|
326 | assert date is not None | |
|
327 | timestamp, tz = date | |
|
328 | sig = pygit2.Signature( | |
|
329 | encoding.unifromlocal(stringutil.person(user)), | |
|
330 | encoding.unifromlocal(stringutil.email(user)), | |
|
331 | timestamp, | |
|
332 | -(tz // 60), | |
|
333 | ) | |
|
334 | oid = self.gitrepo.create_commit( | |
|
335 | None, sig, sig, desc, gitutil.togitnode(manifest), parents | |
|
336 | ) | |
|
337 | # Set up an internal reference to force the commit into the | |
|
338 | # changelog. Hypothetically, we could even use this refs/hg/ | |
|
339 | # namespace to allow for anonymous heads on git repos, which | |
|
340 | # would be neat. | |
|
341 | self.gitrepo.references.create( | |
|
342 | 'refs/hg/internal/latest-commit', oid, force=True | |
|
343 | ) | |
|
344 | # Reindex now to pick up changes. We omit the progress | |
|
345 | # callback because this will be very quick. | |
|
346 | index._index_repo(self.gitrepo, self._db) | |
|
347 | return oid.raw | |
|
348 | ||
|
349 | ||
|
350 | class manifestlog(baselog): | |
|
351 | def __getitem__(self, node): | |
|
352 | return self.get(b'', node) | |
|
353 | ||
|
354 | def get(self, relpath, node): | |
|
355 | if node == nodemod.nullid: | |
|
356 | # TODO: this should almost certainly be a memgittreemanifestctx | |
|
357 | return manifest.memtreemanifestctx(self, relpath) | |
|
358 | commit = self.gitrepo[gitutil.togitnode(node)] | |
|
359 | t = commit.tree | |
|
360 | if relpath: | |
|
361 | parts = relpath.split(b'/') | |
|
362 | for p in parts: | |
|
363 | te = t[p] | |
|
364 | t = self.gitrepo[te.id] | |
|
365 | return gitmanifest.gittreemanifestctx(self.gitrepo, t) | |
|
366 | ||
|
367 | ||
|
368 | @interfaceutil.implementer(repository.ifilestorage) | |
|
369 | class filelog(baselog): | |
|
370 | def __init__(self, gr, db, path): | |
|
371 | super(filelog, self).__init__(gr, db) | |
|
372 | assert isinstance(path, bytes) | |
|
373 | self.path = path | |
|
374 | ||
|
375 | def read(self, node): | |
|
376 | if node == nodemod.nullid: | |
|
377 | return b'' | |
|
378 | return self.gitrepo[gitutil.togitnode(node)].data | |
|
379 | ||
|
380 | def lookup(self, node): | |
|
381 | if len(node) not in (20, 40): | |
|
382 | node = int(node) | |
|
383 | if isinstance(node, int): | |
|
384 | assert False, b'todo revnums for nodes' | |
|
385 | if len(node) == 40: | |
|
386 | node = nodemod.bin(node) | |
|
387 | hnode = gitutil.togitnode(node) | |
|
388 | if hnode in self.gitrepo: | |
|
389 | return node | |
|
390 | raise error.LookupError(self.path, node, _(b'no match found')) | |
|
391 | ||
|
392 | def cmp(self, node, text): | |
|
393 | """Returns True if text is different than content at `node`.""" | |
|
394 | return self.read(node) != text | |
|
395 | ||
|
396 | def add(self, text, meta, transaction, link, p1=None, p2=None): | |
|
397 | assert not meta # Should we even try to handle this? | |
|
398 | return self.gitrepo.create_blob(text).raw | |
|
399 | ||
|
400 | def __iter__(self): | |
|
401 | for clrev in self._db.execute( | |
|
402 | ''' | |
|
403 | SELECT rev FROM changelog | |
|
404 | INNER JOIN changedfiles ON changelog.node = changedfiles.node | |
|
405 | WHERE changedfiles.filename = ? AND changedfiles.filenode != ? | |
|
406 | ''', | |
|
407 | (pycompat.fsdecode(self.path), gitutil.nullgit), | |
|
408 | ): | |
|
409 | yield clrev[0] | |
|
410 | ||
|
411 | def linkrev(self, fr): | |
|
412 | return fr | |
|
413 | ||
|
414 | def rev(self, node): | |
|
415 | row = self._db.execute( | |
|
416 | ''' | |
|
417 | SELECT rev FROM changelog | |
|
418 | INNER JOIN changedfiles ON changelog.node = changedfiles.node | |
|
419 | WHERE changedfiles.filename = ? AND changedfiles.filenode = ?''', | |
|
420 | (pycompat.fsdecode(self.path), gitutil.togitnode(node)), | |
|
421 | ).fetchone() | |
|
422 | if row is None: | |
|
423 | raise error.LookupError(self.path, node, _(b'no such node')) | |
|
424 | return int(row[0]) | |
|
425 | ||
|
426 | def node(self, rev): | |
|
427 | maybe = self._db.execute( | |
|
428 | '''SELECT filenode FROM changedfiles | |
|
429 | INNER JOIN changelog ON changelog.node = changedfiles.node | |
|
430 | WHERE changelog.rev = ? AND filename = ? | |
|
431 | ''', | |
|
432 | (rev, pycompat.fsdecode(self.path)), | |
|
433 | ).fetchone() | |
|
434 | if maybe is None: | |
|
435 | raise IndexError('gitlog %r out of range %d' % (self.path, rev)) | |
|
436 | return nodemod.bin(maybe[0]) | |
|
437 | ||
|
438 | def parents(self, node): | |
|
439 | gn = gitutil.togitnode(node) | |
|
440 | gp = pycompat.fsdecode(self.path) | |
|
441 | ps = [] | |
|
442 | for p in self._db.execute( | |
|
443 | '''SELECT p1filenode, p2filenode FROM changedfiles | |
|
444 | WHERE filenode = ? AND filename = ? | |
|
445 | ''', | |
|
446 | (gn, gp), | |
|
447 | ).fetchone(): | |
|
448 | if p is None: | |
|
449 | commit = self._db.execute( | |
|
450 | "SELECT node FROM changedfiles " | |
|
451 | "WHERE filenode = ? AND filename = ?", | |
|
452 | (gn, gp), | |
|
453 | ).fetchone()[0] | |
|
454 | # This filelog is missing some data. Build the | |
|
455 | # filelog, then recurse (which will always find data). | |
|
456 | if pycompat.ispy3: | |
|
457 | commit = commit.decode('ascii') | |
|
458 | index.fill_in_filelog(self.gitrepo, self._db, commit, gp, gn) | |
|
459 | return self.parents(node) | |
|
460 | else: | |
|
461 | ps.append(nodemod.bin(p)) | |
|
462 | return ps | |
|
463 | ||
|
464 | def renamed(self, node): | |
|
465 | # TODO: renames/copies | |
|
466 | return False |
@@ -0,0 +1,40 b'' | |||
|
1 | """utilities to assist in working with pygit2""" | |
|
2 | from __future__ import absolute_import | |
|
3 | ||
|
4 | from mercurial.node import bin, hex, nullid | |
|
5 | ||
|
6 | from mercurial import pycompat | |
|
7 | ||
|
8 | pygit2_module = None | |
|
9 | ||
|
10 | ||
|
11 | def get_pygit2(): | |
|
12 | global pygit2_module | |
|
13 | if pygit2_module is None: | |
|
14 | try: | |
|
15 | import pygit2 as pygit2_module | |
|
16 | ||
|
17 | pygit2_module.InvalidSpecError | |
|
18 | except (ImportError, AttributeError): | |
|
19 | pass | |
|
20 | return pygit2_module | |
|
21 | ||
|
22 | ||
|
23 | def togitnode(n): | |
|
24 | """Wrapper to convert a Mercurial binary node to a unicode hexlified node. | |
|
25 | ||
|
26 | pygit2 and sqlite both need nodes as strings, not bytes. | |
|
27 | """ | |
|
28 | assert len(n) == 20 | |
|
29 | return pycompat.sysstr(hex(n)) | |
|
30 | ||
|
31 | ||
|
32 | def fromgitnode(n): | |
|
33 | """Opposite of togitnode.""" | |
|
34 | assert len(n) == 40 | |
|
35 | if pycompat.ispy3: | |
|
36 | return bin(n.encode('ascii')) | |
|
37 | return bin(n) | |
|
38 | ||
|
39 | ||
|
40 | nullgit = togitnode(nullid) |
@@ -0,0 +1,350 b'' | |||
|
1 | from __future__ import absolute_import | |
|
2 | ||
|
3 | import collections | |
|
4 | import os | |
|
5 | import sqlite3 | |
|
6 | ||
|
7 | from mercurial.i18n import _ | |
|
8 | ||
|
9 | from mercurial import ( | |
|
10 | encoding, | |
|
11 | error, | |
|
12 | node as nodemod, | |
|
13 | pycompat, | |
|
14 | ) | |
|
15 | ||
|
16 | from . import gitutil | |
|
17 | ||
|
18 | ||
|
19 | pygit2 = gitutil.get_pygit2() | |
|
20 | ||
|
21 | _CURRENT_SCHEMA_VERSION = 1 | |
|
22 | _SCHEMA = ( | |
|
23 | """ | |
|
24 | CREATE TABLE refs ( | |
|
25 | -- node and name are unique together. There may be more than one name for | |
|
26 | -- a given node, and there may be no name at all for a given node (in the | |
|
27 | -- case of an anonymous hg head). | |
|
28 | node TEXT NOT NULL, | |
|
29 | name TEXT | |
|
30 | ); | |
|
31 | ||
|
32 | -- The "possible heads" of the repository, which we use to figure out | |
|
33 | -- if we need to re-walk the changelog. | |
|
34 | CREATE TABLE possible_heads ( | |
|
35 | node TEXT NOT NULL | |
|
36 | ); | |
|
37 | ||
|
38 | -- The topological heads of the changelog, which hg depends on. | |
|
39 | CREATE TABLE heads ( | |
|
40 | node TEXT NOT NULL | |
|
41 | ); | |
|
42 | ||
|
43 | -- A total ordering of the changelog | |
|
44 | CREATE TABLE changelog ( | |
|
45 | rev INTEGER NOT NULL PRIMARY KEY, | |
|
46 | node TEXT NOT NULL, | |
|
47 | p1 TEXT, | |
|
48 | p2 TEXT | |
|
49 | ); | |
|
50 | ||
|
51 | CREATE UNIQUE INDEX changelog_node_idx ON changelog(node); | |
|
52 | CREATE UNIQUE INDEX changelog_node_rev_idx ON changelog(rev, node); | |
|
53 | ||
|
54 | -- Changed files for each commit, which lets us dynamically build | |
|
55 | -- filelogs. | |
|
56 | CREATE TABLE changedfiles ( | |
|
57 | node TEXT NOT NULL, | |
|
58 | filename TEXT NOT NULL, | |
|
59 | -- 40 zeroes for deletions | |
|
60 | filenode TEXT NOT NULL, | |
|
61 | -- to handle filelog parentage: | |
|
62 | p1node TEXT, | |
|
63 | p1filenode TEXT, | |
|
64 | p2node TEXT, | |
|
65 | p2filenode TEXT | |
|
66 | ); | |
|
67 | ||
|
68 | CREATE INDEX changedfiles_nodes_idx | |
|
69 | ON changedfiles(node); | |
|
70 | ||
|
71 | PRAGMA user_version=%d | |
|
72 | """ | |
|
73 | % _CURRENT_SCHEMA_VERSION | |
|
74 | ) | |
|
75 | ||
|
76 | ||
|
77 | def _createdb(path): | |
|
78 | # print('open db', path) | |
|
79 | # import traceback | |
|
80 | # traceback.print_stack() | |
|
81 | db = sqlite3.connect(encoding.strfromlocal(path)) | |
|
82 | db.text_factory = bytes | |
|
83 | ||
|
84 | res = db.execute('PRAGMA user_version').fetchone()[0] | |
|
85 | ||
|
86 | # New database. | |
|
87 | if res == 0: | |
|
88 | for statement in _SCHEMA.split(';'): | |
|
89 | db.execute(statement.strip()) | |
|
90 | ||
|
91 | db.commit() | |
|
92 | ||
|
93 | elif res == _CURRENT_SCHEMA_VERSION: | |
|
94 | pass | |
|
95 | ||
|
96 | else: | |
|
97 | raise error.Abort(_(b'sqlite database has unrecognized version')) | |
|
98 | ||
|
99 | db.execute('PRAGMA journal_mode=WAL') | |
|
100 | ||
|
101 | return db | |
|
102 | ||
|
103 | ||
|
104 | _OUR_ORDER = () | |
|
105 | if pygit2: | |
|
106 | _OUR_ORDER = ( | |
|
107 | pygit2.GIT_SORT_TOPOLOGICAL | |
|
108 | | pygit2.GIT_SORT_TIME | |
|
109 | | pygit2.GIT_SORT_REVERSE | |
|
110 | ) | |
|
111 | ||
|
112 | _DIFF_FLAGS = 1 << 21 # GIT_DIFF_FORCE_BINARY, which isn't exposed by pygit2 | |
|
113 | ||
|
114 | ||
|
115 | def _find_nearest_ancestor_introducing_node( | |
|
116 | db, gitrepo, file_path, walk_start, filenode | |
|
117 | ): | |
|
118 | """Find the nearest ancestor that introduces a file node. | |
|
119 | ||
|
120 | Args: | |
|
121 | db: a handle to our sqlite database. | |
|
122 | gitrepo: A pygit2.Repository instance. | |
|
123 | file_path: the path of a file in the repo | |
|
124 | walk_start: a pygit2.Oid that is a commit where we should start walking | |
|
125 | for our nearest ancestor. | |
|
126 | ||
|
127 | Returns: | |
|
128 | A hexlified SHA that is the commit ID of the next-nearest parent. | |
|
129 | """ | |
|
130 | assert isinstance(file_path, str), 'file_path must be str, got %r' % type( | |
|
131 | file_path | |
|
132 | ) | |
|
133 | assert isinstance(filenode, str), 'filenode must be str, got %r' % type( | |
|
134 | filenode | |
|
135 | ) | |
|
136 | parent_options = { | |
|
137 | row[0].decode('ascii') | |
|
138 | for row in db.execute( | |
|
139 | 'SELECT node FROM changedfiles ' | |
|
140 | 'WHERE filename = ? AND filenode = ?', | |
|
141 | (file_path, filenode), | |
|
142 | ) | |
|
143 | } | |
|
144 | inner_walker = gitrepo.walk(walk_start, _OUR_ORDER) | |
|
145 | for w in inner_walker: | |
|
146 | if w.id.hex in parent_options: | |
|
147 | return w.id.hex | |
|
148 | raise error.ProgrammingError( | |
|
149 | 'Unable to find introducing commit for %s node %s from %s', | |
|
150 | (file_path, filenode, walk_start), | |
|
151 | ) | |
|
152 | ||
|
153 | ||
|
154 | def fill_in_filelog(gitrepo, db, startcommit, path, startfilenode): | |
|
155 | """Given a starting commit and path, fill in a filelog's parent pointers. | |
|
156 | ||
|
157 | Args: | |
|
158 | gitrepo: a pygit2.Repository | |
|
159 | db: a handle to our sqlite database | |
|
160 | startcommit: a hexlified node id for the commit to start at | |
|
161 | path: the path of the file whose parent pointers we should fill in. | |
|
162 | filenode: the hexlified node id of the file at startcommit | |
|
163 | ||
|
164 | TODO: make filenode optional | |
|
165 | """ | |
|
166 | assert isinstance( | |
|
167 | startcommit, str | |
|
168 | ), 'startcommit must be str, got %r' % type(startcommit) | |
|
169 | assert isinstance( | |
|
170 | startfilenode, str | |
|
171 | ), 'startfilenode must be str, got %r' % type(startfilenode) | |
|
172 | visit = collections.deque([(startcommit, startfilenode)]) | |
|
173 | while visit: | |
|
174 | cnode, filenode = visit.popleft() | |
|
175 | commit = gitrepo[cnode] | |
|
176 | parents = [] | |
|
177 | for parent in commit.parents: | |
|
178 | t = parent.tree | |
|
179 | for comp in path.split('/'): | |
|
180 | try: | |
|
181 | t = gitrepo[t[comp].id] | |
|
182 | except KeyError: | |
|
183 | break | |
|
184 | else: | |
|
185 | introducer = _find_nearest_ancestor_introducing_node( | |
|
186 | db, gitrepo, path, parent.id, t.id.hex | |
|
187 | ) | |
|
188 | parents.append((introducer, t.id.hex)) | |
|
189 | p1node = p1fnode = p2node = p2fnode = gitutil.nullgit | |
|
190 | for par, parfnode in parents: | |
|
191 | found = int( | |
|
192 | db.execute( | |
|
193 | 'SELECT COUNT(*) FROM changedfiles WHERE ' | |
|
194 | 'node = ? AND filename = ? AND filenode = ? AND ' | |
|
195 | 'p1node NOT NULL', | |
|
196 | (par, path, parfnode), | |
|
197 | ).fetchone()[0] | |
|
198 | ) | |
|
199 | if found == 0: | |
|
200 | assert par is not None | |
|
201 | visit.append((par, parfnode)) | |
|
202 | if parents: | |
|
203 | p1node, p1fnode = parents[0] | |
|
204 | if len(parents) == 2: | |
|
205 | p2node, p2fnode = parents[1] | |
|
206 | if len(parents) > 2: | |
|
207 | raise error.ProgrammingError( | |
|
208 | b"git support can't handle octopus merges" | |
|
209 | ) | |
|
210 | db.execute( | |
|
211 | 'UPDATE changedfiles SET ' | |
|
212 | 'p1node = ?, p1filenode = ?, p2node = ?, p2filenode = ? ' | |
|
213 | 'WHERE node = ? AND filename = ? AND filenode = ?', | |
|
214 | (p1node, p1fnode, p2node, p2fnode, commit.id.hex, path, filenode), | |
|
215 | ) | |
|
216 | db.commit() | |
|
217 | ||
|
218 | ||
|
219 | def _index_repo(gitrepo, db, progress_factory=lambda *args, **kwargs: None): | |
|
220 | # Identify all references so we can tell the walker to visit all of them. | |
|
221 | all_refs = gitrepo.listall_references() | |
|
222 | possible_heads = set() | |
|
223 | prog = progress_factory(b'refs') | |
|
224 | for pos, ref in enumerate(all_refs): | |
|
225 | if prog is not None: | |
|
226 | prog.update(pos) | |
|
227 | if not ( | |
|
228 | ref.startswith('refs/heads/') # local branch | |
|
229 | or ref.startswith('refs/tags/') # tag | |
|
230 | or ref.startswith('refs/remotes/') # remote branch | |
|
231 | or ref.startswith('refs/hg/') # from this extension | |
|
232 | ): | |
|
233 | continue | |
|
234 | try: | |
|
235 | start = gitrepo.lookup_reference(ref).peel(pygit2.GIT_OBJ_COMMIT) | |
|
236 | except ValueError: | |
|
237 | # No commit to be found, so we don't care for hg's purposes. | |
|
238 | continue | |
|
239 | possible_heads.add(start.id) | |
|
240 | # Optimization: if the list of heads hasn't changed, don't | |
|
241 | # reindex, the changelog. This doesn't matter on small | |
|
242 | # repositories, but on even moderately deep histories (eg cpython) | |
|
243 | # this is a very important performance win. | |
|
244 | # | |
|
245 | # TODO: we should figure out how to incrementally index history | |
|
246 | # (preferably by detecting rewinds!) so that we don't have to do a | |
|
247 | # full changelog walk every time a new commit is created. | |
|
248 | cache_heads = {x[0] for x in db.execute('SELECT node FROM possible_heads')} | |
|
249 | walker = None | |
|
250 | cur_cache_heads = {h.hex for h in possible_heads} | |
|
251 | if cur_cache_heads == cache_heads: | |
|
252 | return | |
|
253 | for start in possible_heads: | |
|
254 | if walker is None: | |
|
255 | walker = gitrepo.walk(start, _OUR_ORDER) | |
|
256 | else: | |
|
257 | walker.push(start) | |
|
258 | ||
|
259 | # Empty out the existing changelog. Even for large-ish histories | |
|
260 | # we can do the top-level "walk all the commits" dance very | |
|
261 | # quickly as long as we don't need to figure out the changed files | |
|
262 | # list. | |
|
263 | db.execute('DELETE FROM changelog') | |
|
264 | if prog is not None: | |
|
265 | prog.complete() | |
|
266 | prog = progress_factory(b'commits') | |
|
267 | # This walker is sure to visit all the revisions in history, but | |
|
268 | # only once. | |
|
269 | for pos, commit in enumerate(walker): | |
|
270 | if prog is not None: | |
|
271 | prog.update(pos) | |
|
272 | p1 = p2 = nodemod.nullhex | |
|
273 | if len(commit.parents) > 2: | |
|
274 | raise error.ProgrammingError( | |
|
275 | ( | |
|
276 | b"git support can't handle octopus merges, " | |
|
277 | b"found a commit with %d parents :(" | |
|
278 | ) | |
|
279 | % len(commit.parents) | |
|
280 | ) | |
|
281 | if commit.parents: | |
|
282 | p1 = commit.parents[0].id.hex | |
|
283 | if len(commit.parents) == 2: | |
|
284 | p2 = commit.parents[1].id.hex | |
|
285 | db.execute( | |
|
286 | 'INSERT INTO changelog (rev, node, p1, p2) VALUES(?, ?, ?, ?)', | |
|
287 | (pos, commit.id.hex, p1, p2), | |
|
288 | ) | |
|
289 | ||
|
290 | num_changedfiles = db.execute( | |
|
291 | "SELECT COUNT(*) from changedfiles WHERE node = ?", | |
|
292 | (commit.id.hex,), | |
|
293 | ).fetchone()[0] | |
|
294 | if not num_changedfiles: | |
|
295 | files = {} | |
|
296 | # I *think* we only need to check p1 for changed files | |
|
297 | # (and therefore linkrevs), because any node that would | |
|
298 | # actually have this commit as a linkrev would be | |
|
299 | # completely new in this rev. | |
|
300 | p1 = commit.parents[0].id.hex if commit.parents else None | |
|
301 | if p1 is not None: | |
|
302 | patchgen = gitrepo.diff(p1, commit.id.hex, flags=_DIFF_FLAGS) | |
|
303 | else: | |
|
304 | patchgen = commit.tree.diff_to_tree( | |
|
305 | swap=True, flags=_DIFF_FLAGS | |
|
306 | ) | |
|
307 | new_files = (p.delta.new_file for p in patchgen) | |
|
308 | files = { | |
|
309 | nf.path: nf.id.hex | |
|
310 | for nf in new_files | |
|
311 | if nf.id.raw != nodemod.nullid | |
|
312 | } | |
|
313 | for p, n in files.items(): | |
|
314 | # We intentionally set NULLs for any file parentage | |
|
315 | # information so it'll get demand-computed later. We | |
|
316 | # used to do it right here, and it was _very_ slow. | |
|
317 | db.execute( | |
|
318 | 'INSERT INTO changedfiles (' | |
|
319 | 'node, filename, filenode, p1node, p1filenode, p2node, ' | |
|
320 | 'p2filenode) VALUES(?, ?, ?, ?, ?, ?, ?)', | |
|
321 | (commit.id.hex, p, n, None, None, None, None), | |
|
322 | ) | |
|
323 | db.execute('DELETE FROM heads') | |
|
324 | db.execute('DELETE FROM possible_heads') | |
|
325 | for hid in possible_heads: | |
|
326 | h = hid.hex | |
|
327 | db.execute('INSERT INTO possible_heads (node) VALUES(?)', (h,)) | |
|
328 | haschild = db.execute( | |
|
329 | 'SELECT COUNT(*) FROM changelog WHERE p1 = ? OR p2 = ?', (h, h) | |
|
330 | ).fetchone()[0] | |
|
331 | if not haschild: | |
|
332 | db.execute('INSERT INTO heads (node) VALUES(?)', (h,)) | |
|
333 | ||
|
334 | db.commit() | |
|
335 | if prog is not None: | |
|
336 | prog.complete() | |
|
337 | ||
|
338 | ||
|
339 | def get_index(gitrepo, progress_factory=lambda *args, **kwargs: None): | |
|
340 | cachepath = os.path.join( | |
|
341 | pycompat.fsencode(gitrepo.path), b'..', b'.hg', b'cache' | |
|
342 | ) | |
|
343 | if not os.path.exists(cachepath): | |
|
344 | os.makedirs(cachepath) | |
|
345 | dbpath = os.path.join(cachepath, b'git-commits.sqlite') | |
|
346 | db = _createdb(dbpath) | |
|
347 | # TODO check against gitrepo heads before doing a full index | |
|
348 | # TODO thread a ui.progress call into this layer | |
|
349 | _index_repo(gitrepo, db, progress_factory) | |
|
350 | return db |
@@ -0,0 +1,297 b'' | |||
|
1 | from __future__ import absolute_import | |
|
2 | ||
|
3 | from mercurial import ( | |
|
4 | match as matchmod, | |
|
5 | pathutil, | |
|
6 | pycompat, | |
|
7 | util, | |
|
8 | ) | |
|
9 | from mercurial.interfaces import ( | |
|
10 | repository, | |
|
11 | util as interfaceutil, | |
|
12 | ) | |
|
13 | from . import gitutil | |
|
14 | ||
|
15 | ||
|
16 | pygit2 = gitutil.get_pygit2() | |
|
17 | ||
|
18 | ||
|
19 | @interfaceutil.implementer(repository.imanifestdict) | |
|
20 | class gittreemanifest(object): | |
|
21 | """Expose git trees (and optionally a builder's overlay) as a manifestdict. | |
|
22 | ||
|
23 | Very similar to mercurial.manifest.treemanifest. | |
|
24 | """ | |
|
25 | ||
|
26 | def __init__(self, git_repo, root_tree, pending_changes): | |
|
27 | """Initializer. | |
|
28 | ||
|
29 | Args: | |
|
30 | git_repo: The git_repo we're walking (required to look up child | |
|
31 | trees). | |
|
32 | root_tree: The root Git tree object for this manifest. | |
|
33 | pending_changes: A dict in which pending changes will be | |
|
34 | tracked. The enclosing memgittreemanifestctx will use this to | |
|
35 | construct any required Tree objects in Git during it's | |
|
36 | `write()` method. | |
|
37 | """ | |
|
38 | self._git_repo = git_repo | |
|
39 | self._tree = root_tree | |
|
40 | if pending_changes is None: | |
|
41 | pending_changes = {} | |
|
42 | # dict of path: Optional[Tuple(node, flags)] | |
|
43 | self._pending_changes = pending_changes | |
|
44 | ||
|
45 | def _resolve_entry(self, path): | |
|
46 | """Given a path, load its node and flags, or raise KeyError if missing. | |
|
47 | ||
|
48 | This takes into account any pending writes in the builder. | |
|
49 | """ | |
|
50 | upath = pycompat.fsdecode(path) | |
|
51 | ent = None | |
|
52 | if path in self._pending_changes: | |
|
53 | val = self._pending_changes[path] | |
|
54 | if val is None: | |
|
55 | raise KeyError | |
|
56 | return val | |
|
57 | t = self._tree | |
|
58 | comps = upath.split('/') | |
|
59 | for comp in comps[:-1]: | |
|
60 | te = self._tree[comp] | |
|
61 | t = self._git_repo[te.id] | |
|
62 | ent = t[comps[-1]] | |
|
63 | if ent.filemode == pygit2.GIT_FILEMODE_BLOB: | |
|
64 | flags = b'' | |
|
65 | elif ent.filemode == pygit2.GIT_FILEMODE_BLOB_EXECUTABLE: | |
|
66 | flags = b'x' | |
|
67 | elif ent.filemode == pygit2.GIT_FILEMODE_LINK: | |
|
68 | flags = b'l' | |
|
69 | else: | |
|
70 | raise ValueError('unsupported mode %s' % oct(ent.filemode)) | |
|
71 | return ent.id.raw, flags | |
|
72 | ||
|
73 | def __getitem__(self, path): | |
|
74 | return self._resolve_entry(path)[0] | |
|
75 | ||
|
76 | def find(self, path): | |
|
77 | return self._resolve_entry(path) | |
|
78 | ||
|
79 | def __len__(self): | |
|
80 | return len(list(self.walk(matchmod.always()))) | |
|
81 | ||
|
82 | def __nonzero__(self): | |
|
83 | try: | |
|
84 | next(iter(self)) | |
|
85 | return True | |
|
86 | except StopIteration: | |
|
87 | return False | |
|
88 | ||
|
89 | __bool__ = __nonzero__ | |
|
90 | ||
|
91 | def __contains__(self, path): | |
|
92 | try: | |
|
93 | self._resolve_entry(path) | |
|
94 | return True | |
|
95 | except KeyError: | |
|
96 | return False | |
|
97 | ||
|
98 | def iterkeys(self): | |
|
99 | return self.walk(matchmod.always()) | |
|
100 | ||
|
101 | def keys(self): | |
|
102 | return list(self.iterkeys()) | |
|
103 | ||
|
104 | def __iter__(self): | |
|
105 | return self.iterkeys() | |
|
106 | ||
|
107 | def __setitem__(self, path, node): | |
|
108 | self._pending_changes[path] = node, self.flags(path) | |
|
109 | ||
|
110 | def __delitem__(self, path): | |
|
111 | # TODO: should probably KeyError for already-deleted files? | |
|
112 | self._pending_changes[path] = None | |
|
113 | ||
|
114 | def filesnotin(self, other, match=None): | |
|
115 | if match is not None: | |
|
116 | match = matchmod.badmatch(match, lambda path, msg: None) | |
|
117 | sm2 = set(other.walk(match)) | |
|
118 | return {f for f in self.walk(match) if f not in sm2} | |
|
119 | return {f for f in self if f not in other} | |
|
120 | ||
|
121 | @util.propertycache | |
|
122 | def _dirs(self): | |
|
123 | return pathutil.dirs(self) | |
|
124 | ||
|
125 | def hasdir(self, dir): | |
|
126 | return dir in self._dirs | |
|
127 | ||
|
128 | def diff(self, other, match=None, clean=False): | |
|
129 | # TODO | |
|
130 | assert False | |
|
131 | ||
|
132 | def setflag(self, path, flag): | |
|
133 | node, unused_flag = self._resolve_entry(path) | |
|
134 | self._pending_changes[path] = node, flag | |
|
135 | ||
|
136 | def get(self, path, default=None): | |
|
137 | try: | |
|
138 | return self._resolve_entry(path)[0] | |
|
139 | except KeyError: | |
|
140 | return default | |
|
141 | ||
|
142 | def flags(self, path): | |
|
143 | try: | |
|
144 | return self._resolve_entry(path)[1] | |
|
145 | except KeyError: | |
|
146 | return b'' | |
|
147 | ||
|
148 | def copy(self): | |
|
149 | pass | |
|
150 | ||
|
151 | def items(self): | |
|
152 | for f in self: | |
|
153 | # TODO: build a proper iterator version of this | |
|
154 | yield self[f] | |
|
155 | ||
|
156 | def iteritems(self): | |
|
157 | return self.items() | |
|
158 | ||
|
159 | def iterentries(self): | |
|
160 | for f in self: | |
|
161 | # TODO: build a proper iterator version of this | |
|
162 | yield self._resolve_entry(f) | |
|
163 | ||
|
164 | def text(self): | |
|
165 | assert False # TODO can this method move out of the manifest iface? | |
|
166 | ||
|
167 | def _walkonetree(self, tree, match, subdir): | |
|
168 | for te in tree: | |
|
169 | # TODO: can we prune dir walks with the matcher? | |
|
170 | realname = subdir + pycompat.fsencode(te.name) | |
|
171 | if te.type == r'tree': | |
|
172 | for inner in self._walkonetree( | |
|
173 | self._git_repo[te.id], match, realname + b'/' | |
|
174 | ): | |
|
175 | yield inner | |
|
176 | if not match(realname): | |
|
177 | continue | |
|
178 | yield pycompat.fsencode(realname) | |
|
179 | ||
|
180 | def walk(self, match): | |
|
181 | # TODO: this is a very lazy way to merge in the pending | |
|
182 | # changes. There is absolutely room for optimization here by | |
|
183 | # being clever about walking over the sets... | |
|
184 | baseline = set(self._walkonetree(self._tree, match, b'')) | |
|
185 | deleted = {p for p, v in self._pending_changes.items() if v is None} | |
|
186 | pend = {p for p in self._pending_changes if match(p)} | |
|
187 | return iter(sorted((baseline | pend) - deleted)) | |
|
188 | ||
|
189 | ||
|
190 | @interfaceutil.implementer(repository.imanifestrevisionstored) | |
|
191 | class gittreemanifestctx(object): | |
|
192 | def __init__(self, repo, gittree): | |
|
193 | self._repo = repo | |
|
194 | self._tree = gittree | |
|
195 | ||
|
196 | def read(self): | |
|
197 | return gittreemanifest(self._repo, self._tree, None) | |
|
198 | ||
|
199 | def readfast(self, shallow=False): | |
|
200 | return self.read() | |
|
201 | ||
|
202 | def copy(self): | |
|
203 | # NB: it's important that we return a memgittreemanifestctx | |
|
204 | # because the caller expects a mutable manifest. | |
|
205 | return memgittreemanifestctx(self._repo, self._tree) | |
|
206 | ||
|
207 | def find(self, path): | |
|
208 | self.read()[path] | |
|
209 | ||
|
210 | ||
|
211 | @interfaceutil.implementer(repository.imanifestrevisionwritable) | |
|
212 | class memgittreemanifestctx(object): | |
|
213 | def __init__(self, repo, tree): | |
|
214 | self._repo = repo | |
|
215 | self._tree = tree | |
|
216 | # dict of path: Optional[Tuple(node, flags)] | |
|
217 | self._pending_changes = {} | |
|
218 | ||
|
219 | def read(self): | |
|
220 | return gittreemanifest(self._repo, self._tree, self._pending_changes) | |
|
221 | ||
|
222 | def copy(self): | |
|
223 | # TODO: if we have a builder in play, what should happen here? | |
|
224 | # Maybe we can shuffle copy() into the immutable interface. | |
|
225 | return memgittreemanifestctx(self._repo, self._tree) | |
|
226 | ||
|
227 | def write(self, transaction, link, p1, p2, added, removed, match=None): | |
|
228 | # We're not (for now, anyway) going to audit filenames, so we | |
|
229 | # can ignore added and removed. | |
|
230 | ||
|
231 | # TODO what does this match argument get used for? hopefully | |
|
232 | # just narrow? | |
|
233 | assert not match or isinstance(match, matchmod.alwaysmatcher) | |
|
234 | ||
|
235 | touched_dirs = pathutil.dirs(list(self._pending_changes)) | |
|
236 | trees = { | |
|
237 | b'': self._tree, | |
|
238 | } | |
|
239 | # path: treebuilder | |
|
240 | builders = { | |
|
241 | b'': self._repo.TreeBuilder(self._tree), | |
|
242 | } | |
|
243 | # get a TreeBuilder for every tree in the touched_dirs set | |
|
244 | for d in sorted(touched_dirs, key=lambda x: (len(x), x)): | |
|
245 | if d == b'': | |
|
246 | # loaded root tree above | |
|
247 | continue | |
|
248 | comps = d.split(b'/') | |
|
249 | full = b'' | |
|
250 | for part in comps: | |
|
251 | parent = trees[full] | |
|
252 | try: | |
|
253 | new = self._repo[parent[pycompat.fsdecode(part)]] | |
|
254 | except KeyError: | |
|
255 | # new directory | |
|
256 | new = None | |
|
257 | full += b'/' + part | |
|
258 | if new is not None: | |
|
259 | # existing directory | |
|
260 | trees[full] = new | |
|
261 | builders[full] = self._repo.TreeBuilder(new) | |
|
262 | else: | |
|
263 | # new directory, use an empty dict to easily | |
|
264 | # generate KeyError as any nested new dirs get | |
|
265 | # created. | |
|
266 | trees[full] = {} | |
|
267 | builders[full] = self._repo.TreeBuilder() | |
|
268 | for f, info in self._pending_changes.items(): | |
|
269 | if b'/' not in f: | |
|
270 | dirname = b'' | |
|
271 | basename = f | |
|
272 | else: | |
|
273 | dirname, basename = f.rsplit(b'/', 1) | |
|
274 | dirname = b'/' + dirname | |
|
275 | if info is None: | |
|
276 | builders[dirname].remove(pycompat.fsdecode(basename)) | |
|
277 | else: | |
|
278 | n, fl = info | |
|
279 | mode = { | |
|
280 | b'': pygit2.GIT_FILEMODE_BLOB, | |
|
281 | b'x': pygit2.GIT_FILEMODE_BLOB_EXECUTABLE, | |
|
282 | b'l': pygit2.GIT_FILEMODE_LINK, | |
|
283 | }[fl] | |
|
284 | builders[dirname].insert( | |
|
285 | pycompat.fsdecode(basename), gitutil.togitnode(n), mode | |
|
286 | ) | |
|
287 | # This visits the buffered TreeBuilders in deepest-first | |
|
288 | # order, bubbling up the edits. | |
|
289 | for b in sorted(builders, key=len, reverse=True): | |
|
290 | if b == b'': | |
|
291 | break | |
|
292 | cb = builders[b] | |
|
293 | dn, bn = b.rsplit(b'/', 1) | |
|
294 | builders[dn].insert( | |
|
295 | pycompat.fsdecode(bn), cb.write(), pygit2.GIT_FILEMODE_TREE | |
|
296 | ) | |
|
297 | return builders[b''].write().raw |
@@ -0,0 +1,26 b'' | |||
|
1 | """collection of simple hooks for common tasks (EXPERIMENTAL) | |
|
2 | ||
|
3 | This extension provides a number of simple hooks to handle issues | |
|
4 | commonly found in repositories with many contributors: | |
|
5 | - email notification when changesets move from draft to public phase | |
|
6 | - email notification when changesets are obsoleted | |
|
7 | - enforcement of draft phase for all incoming changesets | |
|
8 | - enforcement of a no-branch-merge policy | |
|
9 | - enforcement of a no-multiple-heads policy | |
|
10 | ||
|
11 | The implementation of the hooks is subject to change, e.g. whether to | |
|
12 | implement them as individual hooks or merge them into the notify | |
|
13 | extension as option. The functionality itself is planned to be supported | |
|
14 | long-term. | |
|
15 | """ | |
|
16 | from __future__ import absolute_import | |
|
17 | from . import ( | |
|
18 | changeset_obsoleted, | |
|
19 | changeset_published, | |
|
20 | ) | |
|
21 | ||
|
22 | # configtable is only picked up from the "top-level" module of the extension, | |
|
23 | # so expand it here to ensure all items are properly loaded | |
|
24 | configtable = {} | |
|
25 | configtable.update(changeset_published.configtable) | |
|
26 | configtable.update(changeset_obsoleted.configtable) |
@@ -0,0 +1,131 b'' | |||
|
1 | # Copyright 2020 Joerg Sonnenberger <joerg@bec.de> | |
|
2 | # | |
|
3 | # This software may be used and distributed according to the terms of the | |
|
4 | # GNU General Public License version 2 or any later version. | |
|
5 | """changeset_obsoleted is a hook to send a mail when an | |
|
6 | existing draft changeset is obsoleted by an obsmarker without successor. | |
|
7 | ||
|
8 | Correct message threading requires the same messageidseed to be used for both | |
|
9 | the original notification and the new mail. | |
|
10 | ||
|
11 | Usage: | |
|
12 | [notify] | |
|
13 | messageidseed = myseed | |
|
14 | ||
|
15 | [hooks] | |
|
16 | pretxnclose.changeset_obsoleted = \ | |
|
17 | python:hgext.hooklib.changeset_obsoleted.hook | |
|
18 | """ | |
|
19 | ||
|
20 | from __future__ import absolute_import | |
|
21 | ||
|
22 | import email.errors as emailerrors | |
|
23 | import email.utils as emailutils | |
|
24 | ||
|
25 | from mercurial.i18n import _ | |
|
26 | from mercurial import ( | |
|
27 | encoding, | |
|
28 | error, | |
|
29 | logcmdutil, | |
|
30 | mail, | |
|
31 | obsutil, | |
|
32 | pycompat, | |
|
33 | registrar, | |
|
34 | ) | |
|
35 | from mercurial.utils import dateutil | |
|
36 | from .. import notify | |
|
37 | ||
|
38 | configtable = {} | |
|
39 | configitem = registrar.configitem(configtable) | |
|
40 | ||
|
41 | configitem( | |
|
42 | b'notify_obsoleted', b'domain', default=None, | |
|
43 | ) | |
|
44 | configitem( | |
|
45 | b'notify_obsoleted', b'messageidseed', default=None, | |
|
46 | ) | |
|
47 | configitem( | |
|
48 | b'notify_obsoleted', | |
|
49 | b'template', | |
|
50 | default=b'''Subject: changeset abandoned | |
|
51 | ||
|
52 | This changeset has been abandoned. | |
|
53 | ''', | |
|
54 | ) | |
|
55 | ||
|
56 | ||
|
57 | def _report_commit(ui, repo, ctx): | |
|
58 | domain = ui.config(b'notify_obsoleted', b'domain') or ui.config( | |
|
59 | b'notify', b'domain' | |
|
60 | ) | |
|
61 | messageidseed = ui.config( | |
|
62 | b'notify_obsoleted', b'messageidseed' | |
|
63 | ) or ui.config(b'notify', b'messageidseed') | |
|
64 | template = ui.config(b'notify_obsoleted', b'template') | |
|
65 | spec = logcmdutil.templatespec(template, None) | |
|
66 | templater = logcmdutil.changesettemplater(ui, repo, spec) | |
|
67 | ui.pushbuffer() | |
|
68 | n = notify.notifier(ui, repo, b'incoming') | |
|
69 | ||
|
70 | subs = set() | |
|
71 | for sub, spec in n.subs: | |
|
72 | if spec is None: | |
|
73 | subs.add(sub) | |
|
74 | continue | |
|
75 | revs = repo.revs(b'%r and %d:', spec, ctx.rev()) | |
|
76 | if len(revs): | |
|
77 | subs.add(sub) | |
|
78 | continue | |
|
79 | if len(subs) == 0: | |
|
80 | ui.debug( | |
|
81 | b'notify_obsoleted: no subscribers to selected repo and revset\n' | |
|
82 | ) | |
|
83 | return | |
|
84 | ||
|
85 | templater.show( | |
|
86 | ctx, | |
|
87 | changes=ctx.changeset(), | |
|
88 | baseurl=ui.config(b'web', b'baseurl'), | |
|
89 | root=repo.root, | |
|
90 | webroot=n.root, | |
|
91 | ) | |
|
92 | data = ui.popbuffer() | |
|
93 | ||
|
94 | try: | |
|
95 | msg = mail.parsebytes(data) | |
|
96 | except emailerrors.MessageParseError as inst: | |
|
97 | raise error.Abort(inst) | |
|
98 | ||
|
99 | msg['In-reply-to'] = notify.messageid(ctx, domain, messageidseed) | |
|
100 | msg['Message-Id'] = notify.messageid( | |
|
101 | ctx, domain, messageidseed + b'-obsoleted' | |
|
102 | ) | |
|
103 | msg['Date'] = encoding.strfromlocal( | |
|
104 | dateutil.datestr(format=b"%a, %d %b %Y %H:%M:%S %1%2") | |
|
105 | ) | |
|
106 | if not msg['From']: | |
|
107 | sender = ui.config(b'email', b'from') or ui.username() | |
|
108 | if b'@' not in sender or b'@localhost' in sender: | |
|
109 | sender = n.fixmail(sender) | |
|
110 | msg['From'] = mail.addressencode(ui, sender, n.charsets, n.test) | |
|
111 | msg['To'] = ', '.join(sorted(subs)) | |
|
112 | ||
|
113 | msgtext = msg.as_bytes() if pycompat.ispy3 else msg.as_string() | |
|
114 | if ui.configbool(b'notify', b'test'): | |
|
115 | ui.write(msgtext) | |
|
116 | if not msgtext.endswith(b'\n'): | |
|
117 | ui.write(b'\n') | |
|
118 | else: | |
|
119 | ui.status(_(b'notify_obsoleted: sending mail for %d\n') % ctx.rev()) | |
|
120 | mail.sendmail( | |
|
121 | ui, emailutils.parseaddr(msg['From'])[1], subs, msgtext, mbox=n.mbox | |
|
122 | ) | |
|
123 | ||
|
124 | ||
|
125 | def hook(ui, repo, hooktype, node=None, **kwargs): | |
|
126 | if hooktype != b"pretxnclose": | |
|
127 | raise error.Abort( | |
|
128 | _(b'Unsupported hook type %r') % pycompat.bytestr(hooktype) | |
|
129 | ) | |
|
130 | for rev in obsutil.getobsoleted(repo, repo.currenttransaction()): | |
|
131 | _report_commit(ui, repo, repo.unfiltered()[rev]) |
@@ -0,0 +1,131 b'' | |||
|
1 | # Copyright 2020 Joerg Sonnenberger <joerg@bec.de> | |
|
2 | # | |
|
3 | # This software may be used and distributed according to the terms of the | |
|
4 | # GNU General Public License version 2 or any later version. | |
|
5 | """changeset_published is a hook to send a mail when an | |
|
6 | existing draft changeset is moved to the public phase. | |
|
7 | ||
|
8 | Correct message threading requires the same messageidseed to be used for both | |
|
9 | the original notification and the new mail. | |
|
10 | ||
|
11 | Usage: | |
|
12 | [notify] | |
|
13 | messageidseed = myseed | |
|
14 | ||
|
15 | [hooks] | |
|
16 | txnclose-phase.changeset_published = \ | |
|
17 | python:hgext.hooklib.changeset_published.hook | |
|
18 | """ | |
|
19 | ||
|
20 | from __future__ import absolute_import | |
|
21 | ||
|
22 | import email.errors as emailerrors | |
|
23 | import email.utils as emailutils | |
|
24 | ||
|
25 | from mercurial.i18n import _ | |
|
26 | from mercurial import ( | |
|
27 | encoding, | |
|
28 | error, | |
|
29 | logcmdutil, | |
|
30 | mail, | |
|
31 | pycompat, | |
|
32 | registrar, | |
|
33 | ) | |
|
34 | from mercurial.utils import dateutil | |
|
35 | from .. import notify | |
|
36 | ||
|
37 | configtable = {} | |
|
38 | configitem = registrar.configitem(configtable) | |
|
39 | ||
|
40 | configitem( | |
|
41 | b'notify_published', b'domain', default=None, | |
|
42 | ) | |
|
43 | configitem( | |
|
44 | b'notify_published', b'messageidseed', default=None, | |
|
45 | ) | |
|
46 | configitem( | |
|
47 | b'notify_published', | |
|
48 | b'template', | |
|
49 | default=b'''Subject: changeset published | |
|
50 | ||
|
51 | This changeset has been published. | |
|
52 | ''', | |
|
53 | ) | |
|
54 | ||
|
55 | ||
|
56 | def _report_commit(ui, repo, ctx): | |
|
57 | domain = ui.config(b'notify_published', b'domain') or ui.config( | |
|
58 | b'notify', b'domain' | |
|
59 | ) | |
|
60 | messageidseed = ui.config( | |
|
61 | b'notify_published', b'messageidseed' | |
|
62 | ) or ui.config(b'notify', b'messageidseed') | |
|
63 | template = ui.config(b'notify_published', b'template') | |
|
64 | spec = logcmdutil.templatespec(template, None) | |
|
65 | templater = logcmdutil.changesettemplater(ui, repo, spec) | |
|
66 | ui.pushbuffer() | |
|
67 | n = notify.notifier(ui, repo, b'incoming') | |
|
68 | ||
|
69 | subs = set() | |
|
70 | for sub, spec in n.subs: | |
|
71 | if spec is None: | |
|
72 | subs.add(sub) | |
|
73 | continue | |
|
74 | revs = repo.revs(b'%r and %d:', spec, ctx.rev()) | |
|
75 | if len(revs): | |
|
76 | subs.add(sub) | |
|
77 | continue | |
|
78 | if len(subs) == 0: | |
|
79 | ui.debug( | |
|
80 | b'notify_published: no subscribers to selected repo and revset\n' | |
|
81 | ) | |
|
82 | return | |
|
83 | ||
|
84 | templater.show( | |
|
85 | ctx, | |
|
86 | changes=ctx.changeset(), | |
|
87 | baseurl=ui.config(b'web', b'baseurl'), | |
|
88 | root=repo.root, | |
|
89 | webroot=n.root, | |
|
90 | ) | |
|
91 | data = ui.popbuffer() | |
|
92 | ||
|
93 | try: | |
|
94 | msg = mail.parsebytes(data) | |
|
95 | except emailerrors.MessageParseError as inst: | |
|
96 | raise error.Abort(inst) | |
|
97 | ||
|
98 | msg['In-reply-to'] = notify.messageid(ctx, domain, messageidseed) | |
|
99 | msg['Message-Id'] = notify.messageid( | |
|
100 | ctx, domain, messageidseed + b'-published' | |
|
101 | ) | |
|
102 | msg['Date'] = encoding.strfromlocal( | |
|
103 | dateutil.datestr(format=b"%a, %d %b %Y %H:%M:%S %1%2") | |
|
104 | ) | |
|
105 | if not msg['From']: | |
|
106 | sender = ui.config(b'email', b'from') or ui.username() | |
|
107 | if b'@' not in sender or b'@localhost' in sender: | |
|
108 | sender = n.fixmail(sender) | |
|
109 | msg['From'] = mail.addressencode(ui, sender, n.charsets, n.test) | |
|
110 | msg['To'] = ', '.join(sorted(subs)) | |
|
111 | ||
|
112 | msgtext = msg.as_bytes() if pycompat.ispy3 else msg.as_string() | |
|
113 | if ui.configbool(b'notify', b'test'): | |
|
114 | ui.write(msgtext) | |
|
115 | if not msgtext.endswith(b'\n'): | |
|
116 | ui.write(b'\n') | |
|
117 | else: | |
|
118 | ui.status(_(b'notify_published: sending mail for %d\n') % ctx.rev()) | |
|
119 | mail.sendmail( | |
|
120 | ui, emailutils.parseaddr(msg['From'])[1], subs, msgtext, mbox=n.mbox | |
|
121 | ) | |
|
122 | ||
|
123 | ||
|
124 | def hook(ui, repo, hooktype, node=None, **kwargs): | |
|
125 | if hooktype != b"txnclose-phase": | |
|
126 | raise error.Abort( | |
|
127 | _(b'Unsupported hook type %r') % pycompat.bytestr(hooktype) | |
|
128 | ) | |
|
129 | ctx = repo.unfiltered()[node] | |
|
130 | if kwargs['oldphase'] == b'draft' and kwargs['phase'] == b'public': | |
|
131 | _report_commit(ui, repo, ctx) |
@@ -0,0 +1,45 b'' | |||
|
1 | # Copyright 2020 Joerg Sonnenberger <joerg@bec.de> | |
|
2 | # | |
|
3 | # This software may be used and distributed according to the terms of the | |
|
4 | # GNU General Public License version 2 or any later version. | |
|
5 | ||
|
6 | """enforce_draft_commits us a hook to ensure that all new changesets are | |
|
7 | in the draft phase. This allows enforcing policies for work-in-progress | |
|
8 | changes in overlay repositories, i.e. a shared hidden repositories with | |
|
9 | different views for work-in-progress code and public history. | |
|
10 | ||
|
11 | Usage: | |
|
12 | [hooks] | |
|
13 | pretxnclose-phase.enforce_draft_commits = \ | |
|
14 | python:hgext.hooklib.enforce_draft_commits.hook | |
|
15 | """ | |
|
16 | ||
|
17 | from __future__ import absolute_import | |
|
18 | ||
|
19 | from mercurial.i18n import _ | |
|
20 | from mercurial import ( | |
|
21 | error, | |
|
22 | pycompat, | |
|
23 | ) | |
|
24 | ||
|
25 | ||
|
26 | def hook(ui, repo, hooktype, node=None, **kwargs): | |
|
27 | if hooktype != b"pretxnclose-phase": | |
|
28 | raise error.Abort( | |
|
29 | _(b'Unsupported hook type %r') % pycompat.bytestr(hooktype) | |
|
30 | ) | |
|
31 | ctx = repo.unfiltered()[node] | |
|
32 | if kwargs['oldphase']: | |
|
33 | raise error.Abort( | |
|
34 | _(b'Phase change from %r to %r for %s rejected') | |
|
35 | % ( | |
|
36 | pycompat.bytestr(kwargs['oldphase']), | |
|
37 | pycompat.bytestr(kwargs['phase']), | |
|
38 | ctx, | |
|
39 | ) | |
|
40 | ) | |
|
41 | elif kwargs['phase'] != b'draft': | |
|
42 | raise error.Abort( | |
|
43 | _(b'New changeset %s in phase %r rejected') | |
|
44 | % (ctx, pycompat.bytestr(kwargs['phase'])) | |
|
45 | ) |
@@ -0,0 +1,45 b'' | |||
|
1 | # Copyright 2020 Joerg Sonnenberger <joerg@bec.de> | |
|
2 | # | |
|
3 | # This software may be used and distributed according to the terms of the | |
|
4 | # GNU General Public License version 2 or any later version. | |
|
5 | ||
|
6 | """reject_merge_commits is a hook to check new changesets for merge commits. | |
|
7 | Merge commits are allowed only between different branches, i.e. merging | |
|
8 | a feature branch into the main development branch. This can be used to | |
|
9 | enforce policies for linear commit histories. | |
|
10 | ||
|
11 | Usage: | |
|
12 | [hooks] | |
|
13 | pretxnchangegroup.reject_merge_commits = \ | |
|
14 | python:hgext.hooklib.reject_merge_commits.hook | |
|
15 | """ | |
|
16 | ||
|
17 | from __future__ import absolute_import | |
|
18 | ||
|
19 | from mercurial.i18n import _ | |
|
20 | from mercurial import ( | |
|
21 | error, | |
|
22 | pycompat, | |
|
23 | ) | |
|
24 | ||
|
25 | ||
|
26 | def hook(ui, repo, hooktype, node=None, **kwargs): | |
|
27 | if hooktype != b"pretxnchangegroup": | |
|
28 | raise error.Abort( | |
|
29 | _(b'Unsupported hook type %r') % pycompat.bytestr(hooktype) | |
|
30 | ) | |
|
31 | ||
|
32 | ctx = repo.unfiltered()[node] | |
|
33 | for rev in repo.changelog.revs(start=ctx.rev()): | |
|
34 | rev = repo[rev] | |
|
35 | parents = rev.parents() | |
|
36 | if len(parents) < 2: | |
|
37 | continue | |
|
38 | if all(repo[p].branch() == rev.branch() for p in parents): | |
|
39 | raise error.Abort( | |
|
40 | _( | |
|
41 | b'%s rejected as merge on the same branch. ' | |
|
42 | b'Please consider rebase.' | |
|
43 | ) | |
|
44 | % rev | |
|
45 | ) |
@@ -0,0 +1,41 b'' | |||
|
1 | # Copyright 2020 Joerg Sonnenberger <joerg@bec.de> | |
|
2 | # | |
|
3 | # This software may be used and distributed according to the terms of the | |
|
4 | # GNU General Public License version 2 or any later version. | |
|
5 | ||
|
6 | """reject_new_heads is a hook to check that branches touched by new changesets | |
|
7 | have at most one open head. It can be used to enforce policies for | |
|
8 | merge-before-push or rebase-before-push. It does not handle pre-existing | |
|
9 | hydras. | |
|
10 | ||
|
11 | Usage: | |
|
12 | [hooks] | |
|
13 | pretxnclose.reject_new_heads = \ | |
|
14 | python:hgext.hooklib.reject_new_heads.hook | |
|
15 | """ | |
|
16 | ||
|
17 | from __future__ import absolute_import | |
|
18 | ||
|
19 | from mercurial.i18n import _ | |
|
20 | from mercurial import ( | |
|
21 | error, | |
|
22 | pycompat, | |
|
23 | ) | |
|
24 | ||
|
25 | ||
|
26 | def hook(ui, repo, hooktype, node=None, **kwargs): | |
|
27 | if hooktype != b"pretxnclose": | |
|
28 | raise error.Abort( | |
|
29 | _(b'Unsupported hook type %r') % pycompat.bytestr(hooktype) | |
|
30 | ) | |
|
31 | ctx = repo.unfiltered()[node] | |
|
32 | branches = set() | |
|
33 | for rev in repo.changelog.revs(start=ctx.rev()): | |
|
34 | rev = repo[rev] | |
|
35 | branches.add(rev.branch()) | |
|
36 | for branch in branches: | |
|
37 | if len(repo.revs("head() and not closed() and branch(%s)", branch)) > 1: | |
|
38 | raise error.Abort( | |
|
39 | _(b'Changes on branch %r resulted in multiple heads') | |
|
40 | % pycompat.bytestr(branch) | |
|
41 | ) |
|
1 | NO CONTENT: new file 100644 | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: new file 100644 | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: new file 100644 | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: new file 100644 | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: new file 100644 | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: new file 100644 | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: new file 100644 | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: new file 100644 | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: new file 100644 | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: new file 100644 | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: new file 100644 | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: new file 100644 | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: new file 100644 | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: new file 100644 | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: new file 100644 | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: new file 100644 | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: new file 100644 | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: new file 100644 | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: new file 100644 | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: new file 100644 | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: new file 100644 | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: new file 100644 | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: new file 100644 | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: new file 100644 | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: new file 100644 | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: new file 100644 | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: new file 100644 | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: new file 100644 | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: new file 100644 | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: new file 100644 | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: new file 100644 | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: new file 100644 | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: new file 100644 | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: new file 100644 | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: new file 100644 | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: new file 100644 | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: new file 100644 | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: new file 100644 | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: new file 100644 | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: new file 100644 | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: new file 100644 | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: new file 100755 | |
The requested commit or file is too big and content was truncated. Show full diff |
@@ -64,6 +64,7 b' doc:' | |||
|
64 | 64 | $(MAKE) -C doc |
|
65 | 65 | |
|
66 | 66 | cleanbutpackages: |
|
67 | rm -f hg.exe | |
|
67 | 68 |
|
|
68 | 69 | find contrib doc hgext hgext3rd i18n mercurial tests hgdemandimport \ |
|
69 | 70 | \( -name '*.py[cdo]' -o -name '*.so' \) -exec rm -f '{}' ';' |
@@ -9,7 +9,6 b' build/' | |||
|
9 | 9 | | \.mypy_cache/ |
|
10 | 10 | | \.venv/ |
|
11 | 11 | | mercurial/thirdparty/ |
|
12 | | contrib/python-zstandard/ | |
|
13 | 12 | ''' |
|
14 | 13 | skip-string-normalization = true |
|
15 | 14 | quiet = true |
@@ -81,7 +81,7 b' def runperfcommand(reponame, command, *a' | |||
|
81 | 81 | output = ui.popbuffer() |
|
82 | 82 | match = outputre.search(output) |
|
83 | 83 | if not match: |
|
84 |
raise ValueError("Invalid output { |
|
|
84 | raise ValueError("Invalid output {}".format(output)) | |
|
85 | 85 | return float(match.group(1)) |
|
86 | 86 | |
|
87 | 87 |
@@ -32,7 +32,7 b' def check_compat_py2(f):' | |||
|
32 | 32 | for node in ast.walk(root): |
|
33 | 33 | if isinstance(node, ast.ImportFrom): |
|
34 | 34 | if node.module == '__future__': |
|
35 |
futures |= |
|
|
35 | futures |= {n.name for n in node.names} | |
|
36 | 36 | elif isinstance(node, ast.Print): |
|
37 | 37 | haveprint = True |
|
38 | 38 |
@@ -226,6 +226,16 b' static void execcmdserver(const struct c' | |||
|
226 | 226 | } |
|
227 | 227 | argv[argsize - 1] = NULL; |
|
228 | 228 | |
|
229 | const char *lc_ctype_env = getenv("LC_CTYPE"); | |
|
230 | if (lc_ctype_env == NULL) { | |
|
231 | if (putenv("CHG_CLEAR_LC_CTYPE=") != 0) | |
|
232 | abortmsgerrno("failed to putenv CHG_CLEAR_LC_CTYPE"); | |
|
233 | } else { | |
|
234 | if (setenv("CHGORIG_LC_CTYPE", lc_ctype_env, 1) != 0) { | |
|
235 | abortmsgerrno("failed to setenv CHGORIG_LC_CTYYPE"); | |
|
236 | } | |
|
237 | } | |
|
238 | ||
|
229 | 239 | if (putenv("CHGINTERNALMARK=") != 0) |
|
230 | 240 | abortmsgerrno("failed to putenv"); |
|
231 | 241 | if (execvp(hgcmd, (char **)argv) < 0) |
@@ -364,8 +374,7 b' static int runinstructions(struct cmdser' | |||
|
364 | 374 | |
|
365 | 375 | /* |
|
366 | 376 | * Test whether the command is unsupported or not. This is not designed to |
|
367 |
* cover all cases. But it's fast, does not depend on the server |
|
|
368 | * not return false positives. | |
|
377 | * cover all cases. But it's fast, does not depend on the server. | |
|
369 | 378 | */ |
|
370 | 379 | static int isunsupported(int argc, const char *argv[]) |
|
371 | 380 | { |
@@ -378,7 +387,12 b' static int isunsupported(int argc, const' | |||
|
378 | 387 | for (i = 0; i < argc; ++i) { |
|
379 | 388 | if (strcmp(argv[i], "--") == 0) |
|
380 | 389 | break; |
|
381 | if (i == 0 && strcmp("serve", argv[i]) == 0) | |
|
390 | /* | |
|
391 | * there can be false positives but no false negative | |
|
392 | * we cannot assume `serve` will always be first argument | |
|
393 | * because global options can be passed before the command name | |
|
394 | */ | |
|
395 | if (strcmp("serve", argv[i]) == 0) | |
|
382 | 396 | state |= SERVE; |
|
383 | 397 | else if (strcmp("-d", argv[i]) == 0 || |
|
384 | 398 | strcmp("--daemon", argv[i]) == 0) |
@@ -6,7 +6,7 b' rustfmt:command = rustfmt +nightly' | |||
|
6 | 6 | rustfmt:pattern = set:**.rs |
|
7 | 7 | |
|
8 | 8 | black:command = black --config=black.toml - |
|
9 |
black:pattern = set:**.py - mercurial/thirdparty/** |
|
|
9 | black:pattern = set:**.py - mercurial/thirdparty/** | |
|
10 | 10 | |
|
11 | 11 | # Mercurial doesn't have any Go code, but if we did this is how we |
|
12 | 12 | # would configure `hg fix` for Go: |
@@ -42,13 +42,13 b' rust-cargo-test-py3:' | |||
|
42 | 42 | test-py2: |
|
43 | 43 | <<: *runtests |
|
44 | 44 | variables: |
|
45 | RUNTEST_ARGS: "--blacklist /tmp/check-tests.txt" | |
|
45 | RUNTEST_ARGS: " --no-rust --blacklist /tmp/check-tests.txt" | |
|
46 | 46 | TEST_HGMODULEPOLICY: "c" |
|
47 | 47 | |
|
48 | 48 | test-py3: |
|
49 | 49 | <<: *runtests |
|
50 | 50 | variables: |
|
51 | RUNTEST_ARGS: "--blacklist /tmp/check-tests.txt" | |
|
51 | RUNTEST_ARGS: " --no-rust --blacklist /tmp/check-tests.txt" | |
|
52 | 52 | PYTHON: python3 |
|
53 | 53 | TEST_HGMODULEPOLICY: "c" |
|
54 | 54 | |
@@ -69,13 +69,13 b' test-py2-rust:' | |||
|
69 | 69 | <<: *runtests |
|
70 | 70 | variables: |
|
71 | 71 | HGWITHRUSTEXT: cpython |
|
72 | RUNTEST_ARGS: "--blacklist /tmp/check-tests.txt" | |
|
72 | RUNTEST_ARGS: "--rust --blacklist /tmp/check-tests.txt" | |
|
73 | 73 | TEST_HGMODULEPOLICY: "rust+c" |
|
74 | 74 | |
|
75 | 75 | test-py3-rust: |
|
76 | 76 | <<: *runtests |
|
77 | 77 | variables: |
|
78 | 78 | HGWITHRUSTEXT: cpython |
|
79 | RUNTEST_ARGS: "--blacklist /tmp/check-tests.txt" | |
|
79 | RUNTEST_ARGS: "--rust --blacklist /tmp/check-tests.txt" | |
|
80 | 80 | PYTHON: python3 |
|
81 | 81 | TEST_HGMODULEPOLICY: "rust+c" |
@@ -392,9 +392,10 b' def imported_modules(source, modulename,' | |||
|
392 | 392 | modnotfound = True |
|
393 | 393 | continue |
|
394 | 394 | yield found[1] |
|
395 | if modnotfound: | |
|
395 | if modnotfound and dottedpath != modulename: | |
|
396 | 396 | # "dottedpath" is a package, but imported because of non-module |
|
397 | 397 | # lookup |
|
398 | # specifically allow "from . import foo" from __init__.py | |
|
398 | 399 | yield dottedpath |
|
399 | 400 | |
|
400 | 401 |
@@ -1536,6 +1536,7 b' def perfindex(ui, repo, **opts):' | |||
|
1536 | 1536 | matters. |
|
1537 | 1537 | |
|
1538 | 1538 | Example of useful set to test: |
|
1539 | ||
|
1539 | 1540 | * tip |
|
1540 | 1541 | * 0 |
|
1541 | 1542 | * -10: |
@@ -2522,7 +2523,7 b' def perfdiffwd(ui, repo, **opts):' | |||
|
2522 | 2523 | } |
|
2523 | 2524 | |
|
2524 | 2525 | for diffopt in ('', 'w', 'b', 'B', 'wB'): |
|
2525 |
opts = |
|
|
2526 | opts = {options[c]: b'1' for c in diffopt} | |
|
2526 | 2527 | |
|
2527 | 2528 | def d(): |
|
2528 | 2529 | ui.pushbuffer() |
@@ -3047,7 +3048,7 b' def perfrevlogchunks(ui, repo, file_=Non' | |||
|
3047 | 3048 | |
|
3048 | 3049 | # Verify engines argument. |
|
3049 | 3050 | if engines: |
|
3050 |
engines = |
|
|
3051 | engines = {e.strip() for e in engines.split(b',')} | |
|
3051 | 3052 | for engine in engines: |
|
3052 | 3053 | try: |
|
3053 | 3054 | util.compressionengines[engine] |
@@ -52,7 +52,8 b' SOURCES = [' | |||
|
52 | 52 | |
|
53 | 53 | # Headers whose preprocessed output will be fed into cdef(). |
|
54 | 54 | HEADERS = [ |
|
55 | os.path.join(HERE, "zstd", *p) for p in (("zstd.h",), ("dictBuilder", "zdict.h"),) | |
|
55 | os.path.join(HERE, "zstd", *p) | |
|
56 | for p in (("zstd.h",), ("dictBuilder", "zdict.h"),) | |
|
56 | 57 | ] |
|
57 | 58 | |
|
58 | 59 | INCLUDE_DIRS = [ |
@@ -139,7 +140,9 b' def preprocess(path):' | |||
|
139 | 140 | env = dict(os.environ) |
|
140 | 141 | if getattr(compiler, "_paths", None): |
|
141 | 142 | env["PATH"] = compiler._paths |
|
142 | process = subprocess.Popen(args + [input_file], stdout=subprocess.PIPE, env=env) | |
|
143 | process = subprocess.Popen( | |
|
144 | args + [input_file], stdout=subprocess.PIPE, env=env | |
|
145 | ) | |
|
143 | 146 | output = process.communicate()[0] |
|
144 | 147 | ret = process.poll() |
|
145 | 148 | if ret: |
@@ -87,7 +87,9 b' with open("c-ext/python-zstandard.h", "r' | |||
|
87 | 87 | break |
|
88 | 88 | |
|
89 | 89 | if not version: |
|
90 | raise Exception("could not resolve package version; " "this should never happen") | |
|
90 | raise Exception( | |
|
91 | "could not resolve package version; " "this should never happen" | |
|
92 | ) | |
|
91 | 93 | |
|
92 | 94 | setup( |
|
93 | 95 | name="zstandard", |
@@ -138,12 +138,16 b' def get_c_extension(' | |||
|
138 | 138 | if not system_zstd: |
|
139 | 139 | sources.update([os.path.join(actual_root, p) for p in zstd_sources]) |
|
140 | 140 | if support_legacy: |
|
141 | sources.update([os.path.join(actual_root, p) for p in zstd_sources_legacy]) | |
|
141 | sources.update( | |
|
142 | [os.path.join(actual_root, p) for p in zstd_sources_legacy] | |
|
143 | ) | |
|
142 | 144 | sources = list(sources) |
|
143 | 145 | |
|
144 | 146 | include_dirs = set([os.path.join(actual_root, d) for d in ext_includes]) |
|
145 | 147 | if not system_zstd: |
|
146 | include_dirs.update([os.path.join(actual_root, d) for d in zstd_includes]) | |
|
148 | include_dirs.update( | |
|
149 | [os.path.join(actual_root, d) for d in zstd_includes] | |
|
150 | ) | |
|
147 | 151 | if support_legacy: |
|
148 | 152 | include_dirs.update( |
|
149 | 153 | [os.path.join(actual_root, d) for d in zstd_includes_legacy] |
@@ -50,7 +50,9 b' def make_cffi(cls):' | |||
|
50 | 50 | os.environ.update(old_env) |
|
51 | 51 | |
|
52 | 52 | if mod.backend != "cffi": |
|
53 | raise Exception("got the zstandard %s backend instead of cffi" % mod.backend) | |
|
53 | raise Exception( | |
|
54 | "got the zstandard %s backend instead of cffi" % mod.backend | |
|
55 | ) | |
|
54 | 56 | |
|
55 | 57 | # If CFFI version is available, dynamically construct test methods |
|
56 | 58 | # that use it. |
@@ -84,7 +86,9 b' def make_cffi(cls):' | |||
|
84 | 86 | fn.__func__.func_defaults, |
|
85 | 87 | fn.__func__.func_closure, |
|
86 | 88 | ) |
|
87 |
new_method = types.UnboundMethodType( |
|
|
89 | new_method = types.UnboundMethodType( | |
|
90 | new_fn, fn.im_self, fn.im_class | |
|
91 | ) | |
|
88 | 92 | |
|
89 | 93 | setattr(cls, name, new_method) |
|
90 | 94 | |
@@ -194,4 +198,6 b' if hypothesis:' | |||
|
194 | 198 | expensive_settings = hypothesis.settings(deadline=None, max_examples=10000) |
|
195 | 199 | hypothesis.settings.register_profile("expensive", expensive_settings) |
|
196 | 200 | |
|
197 |
hypothesis.settings.load_profile( |
|
|
201 | hypothesis.settings.load_profile( | |
|
202 | os.environ.get("HYPOTHESIS_PROFILE", "default") | |
|
203 | ) |
@@ -67,7 +67,8 b' class TestBufferWithSegments(TestCase):' | |||
|
67 | 67 | self.skipTest("BufferWithSegments not available") |
|
68 | 68 | |
|
69 | 69 | b = zstd.BufferWithSegments( |
|
70 | b"foofooxfooxy", b"".join([ss.pack(0, 3), ss.pack(3, 4), ss.pack(7, 5)]) | |
|
70 | b"foofooxfooxy", | |
|
71 | b"".join([ss.pack(0, 3), ss.pack(3, 4), ss.pack(7, 5)]), | |
|
71 | 72 | ) |
|
72 | 73 | self.assertEqual(len(b), 3) |
|
73 | 74 | self.assertEqual(b.size, 12) |
@@ -83,17 +84,23 b' class TestBufferWithSegmentsCollection(T' | |||
|
83 | 84 | if not hasattr(zstd, "BufferWithSegmentsCollection"): |
|
84 | 85 | self.skipTest("BufferWithSegmentsCollection not available") |
|
85 | 86 | |
|
86 |
with self.assertRaisesRegex( |
|
|
87 | with self.assertRaisesRegex( | |
|
88 | ValueError, "must pass at least 1 argument" | |
|
89 | ): | |
|
87 | 90 | zstd.BufferWithSegmentsCollection() |
|
88 | 91 | |
|
89 | 92 | def test_argument_validation(self): |
|
90 | 93 | if not hasattr(zstd, "BufferWithSegmentsCollection"): |
|
91 | 94 | self.skipTest("BufferWithSegmentsCollection not available") |
|
92 | 95 | |
|
93 | with self.assertRaisesRegex(TypeError, "arguments must be BufferWithSegments"): | |
|
96 | with self.assertRaisesRegex( | |
|
97 | TypeError, "arguments must be BufferWithSegments" | |
|
98 | ): | |
|
94 | 99 | zstd.BufferWithSegmentsCollection(None) |
|
95 | 100 | |
|
96 | with self.assertRaisesRegex(TypeError, "arguments must be BufferWithSegments"): | |
|
101 | with self.assertRaisesRegex( | |
|
102 | TypeError, "arguments must be BufferWithSegments" | |
|
103 | ): | |
|
97 | 104 | zstd.BufferWithSegmentsCollection( |
|
98 | 105 | zstd.BufferWithSegments(b"foo", ss.pack(0, 3)), None |
|
99 | 106 | ) |
@@ -24,7 +24,9 b' else:' | |||
|
24 | 24 | |
|
25 | 25 | |
|
26 | 26 | def multithreaded_chunk_size(level, source_size=0): |
|
27 |
params = zstd.ZstdCompressionParameters.from_level( |
|
|
27 | params = zstd.ZstdCompressionParameters.from_level( | |
|
28 | level, source_size=source_size | |
|
29 | ) | |
|
28 | 30 | |
|
29 | 31 | return 1 << (params.window_log + 2) |
|
30 | 32 | |
@@ -86,7 +88,9 b' class TestCompressor_compress(TestCase):' | |||
|
86 | 88 | |
|
87 | 89 | # This matches the test for read_to_iter() below. |
|
88 | 90 | cctx = zstd.ZstdCompressor(level=1, write_content_size=False) |
|
89 | result = cctx.compress(b"f" * zstd.COMPRESSION_RECOMMENDED_INPUT_SIZE + b"o") | |
|
91 | result = cctx.compress( | |
|
92 | b"f" * zstd.COMPRESSION_RECOMMENDED_INPUT_SIZE + b"o" | |
|
93 | ) | |
|
90 | 94 | self.assertEqual( |
|
91 | 95 | result, |
|
92 | 96 | b"\x28\xb5\x2f\xfd\x00\x40\x54\x00\x00" |
@@ -99,7 +103,9 b' class TestCompressor_compress(TestCase):' | |||
|
99 | 103 | result = cctx.compress(b"foo" * 256) |
|
100 | 104 | |
|
101 | 105 | def test_no_magic(self): |
|
102 |
params = zstd.ZstdCompressionParameters.from_level( |
|
|
106 | params = zstd.ZstdCompressionParameters.from_level( | |
|
107 | 1, format=zstd.FORMAT_ZSTD1 | |
|
108 | ) | |
|
103 | 109 | cctx = zstd.ZstdCompressor(compression_params=params) |
|
104 | 110 | magic = cctx.compress(b"foobar") |
|
105 | 111 | |
@@ -223,7 +229,8 b' class TestCompressor_compress(TestCase):' | |||
|
223 | 229 | |
|
224 | 230 | self.assertEqual( |
|
225 | 231 | result, |
|
226 |
b"\x28\xb5\x2f\xfd\x23\x8f\x55\x0f\x70\x03\x19\x00\x00" |
|
|
232 | b"\x28\xb5\x2f\xfd\x23\x8f\x55\x0f\x70\x03\x19\x00\x00" | |
|
233 | b"\x66\x6f\x6f", | |
|
227 | 234 | ) |
|
228 | 235 | |
|
229 | 236 | def test_multithreaded_compression_params(self): |
@@ -234,7 +241,9 b' class TestCompressor_compress(TestCase):' | |||
|
234 | 241 | params = zstd.get_frame_parameters(result) |
|
235 | 242 | self.assertEqual(params.content_size, 3) |
|
236 | 243 | |
|
237 | self.assertEqual(result, b"\x28\xb5\x2f\xfd\x20\x03\x19\x00\x00\x66\x6f\x6f") | |
|
244 | self.assertEqual( | |
|
245 | result, b"\x28\xb5\x2f\xfd\x20\x03\x19\x00\x00\x66\x6f\x6f" | |
|
246 | ) | |
|
238 | 247 | |
|
239 | 248 | |
|
240 | 249 | @make_cffi |
@@ -347,7 +356,9 b' class TestCompressor_compressobj(TestCas' | |||
|
347 | 356 | ) |
|
348 | 357 | self.assertEqual(cobj.compress(b"bar"), b"") |
|
349 | 358 | # 3 byte header plus content. |
|
350 | self.assertEqual(cobj.flush(zstd.COMPRESSOBJ_FLUSH_BLOCK), b"\x18\x00\x00bar") | |
|
359 | self.assertEqual( | |
|
360 | cobj.flush(zstd.COMPRESSOBJ_FLUSH_BLOCK), b"\x18\x00\x00bar" | |
|
361 | ) | |
|
351 | 362 | self.assertEqual(cobj.flush(), b"\x01\x00\x00") |
|
352 | 363 | |
|
353 | 364 | def test_flush_empty_block(self): |
@@ -445,7 +456,9 b' class TestCompressor_copy_stream(TestCas' | |||
|
445 | 456 | self.assertEqual(int(r), 0) |
|
446 | 457 | self.assertEqual(w, 9) |
|
447 | 458 | |
|
448 | self.assertEqual(dest.getvalue(), b"\x28\xb5\x2f\xfd\x00\x48\x01\x00\x00") | |
|
459 | self.assertEqual( | |
|
460 | dest.getvalue(), b"\x28\xb5\x2f\xfd\x00\x48\x01\x00\x00" | |
|
461 | ) | |
|
449 | 462 | |
|
450 | 463 | def test_large_data(self): |
|
451 | 464 | source = io.BytesIO() |
@@ -478,7 +491,9 b' class TestCompressor_copy_stream(TestCas' | |||
|
478 | 491 | cctx = zstd.ZstdCompressor(level=1, write_checksum=True) |
|
479 | 492 | cctx.copy_stream(source, with_checksum) |
|
480 | 493 | |
|
481 | self.assertEqual(len(with_checksum.getvalue()), len(no_checksum.getvalue()) + 4) | |
|
494 | self.assertEqual( | |
|
495 | len(with_checksum.getvalue()), len(no_checksum.getvalue()) + 4 | |
|
496 | ) | |
|
482 | 497 | |
|
483 | 498 | no_params = zstd.get_frame_parameters(no_checksum.getvalue()) |
|
484 | 499 | with_params = zstd.get_frame_parameters(with_checksum.getvalue()) |
@@ -585,7 +600,9 b' class TestCompressor_stream_reader(TestC' | |||
|
585 | 600 | cctx = zstd.ZstdCompressor() |
|
586 | 601 | |
|
587 | 602 | with cctx.stream_reader(b"foo") as reader: |
|
588 |
with self.assertRaisesRegex( |
|
|
603 | with self.assertRaisesRegex( | |
|
604 | ValueError, "cannot __enter__ multiple times" | |
|
605 | ): | |
|
589 | 606 | with reader as reader2: |
|
590 | 607 | pass |
|
591 | 608 | |
@@ -744,7 +761,9 b' class TestCompressor_stream_reader(TestC' | |||
|
744 | 761 | source = io.BytesIO(b"foobar") |
|
745 | 762 | |
|
746 | 763 | with cctx.stream_reader(source, size=2) as reader: |
|
747 |
with self.assertRaisesRegex( |
|
|
764 | with self.assertRaisesRegex( | |
|
765 | zstd.ZstdError, "Src size is incorrect" | |
|
766 | ): | |
|
748 | 767 | reader.read(10) |
|
749 | 768 | |
|
750 | 769 | # Try another compression operation. |
@@ -1126,7 +1145,9 b' class TestCompressor_stream_writer(TestC' | |||
|
1126 | 1145 | self.assertFalse(no_params.has_checksum) |
|
1127 | 1146 | self.assertTrue(with_params.has_checksum) |
|
1128 | 1147 | |
|
1129 | self.assertEqual(len(with_checksum.getvalue()), len(no_checksum.getvalue()) + 4) | |
|
1148 | self.assertEqual( | |
|
1149 | len(with_checksum.getvalue()), len(no_checksum.getvalue()) + 4 | |
|
1150 | ) | |
|
1130 | 1151 | |
|
1131 | 1152 | def test_write_content_size(self): |
|
1132 | 1153 | no_size = NonClosingBytesIO() |
@@ -1145,7 +1166,9 b' class TestCompressor_stream_writer(TestC' | |||
|
1145 | 1166 | |
|
1146 | 1167 | # Declaring size will write the header. |
|
1147 | 1168 | with_size = NonClosingBytesIO() |
|
1148 | with cctx.stream_writer(with_size, size=len(b"foobar" * 256)) as compressor: | |
|
1169 | with cctx.stream_writer( | |
|
1170 | with_size, size=len(b"foobar" * 256) | |
|
1171 | ) as compressor: | |
|
1149 | 1172 | self.assertEqual(compressor.write(b"foobar" * 256), 0) |
|
1150 | 1173 | |
|
1151 | 1174 | no_params = zstd.get_frame_parameters(no_size.getvalue()) |
@@ -1191,7 +1214,9 b' class TestCompressor_stream_writer(TestC' | |||
|
1191 | 1214 | self.assertFalse(no_params.has_checksum) |
|
1192 | 1215 | self.assertFalse(with_params.has_checksum) |
|
1193 | 1216 | |
|
1194 | self.assertEqual(len(with_dict_id.getvalue()), len(no_dict_id.getvalue()) + 4) | |
|
1217 | self.assertEqual( | |
|
1218 | len(with_dict_id.getvalue()), len(no_dict_id.getvalue()) + 4 | |
|
1219 | ) | |
|
1195 | 1220 | |
|
1196 | 1221 | def test_memory_size(self): |
|
1197 | 1222 | cctx = zstd.ZstdCompressor(level=3) |
@@ -1337,7 +1362,9 b' class TestCompressor_read_to_iter(TestCa' | |||
|
1337 | 1362 | for chunk in cctx.read_to_iter(b"foobar"): |
|
1338 | 1363 | pass |
|
1339 | 1364 | |
|
1340 |
with self.assertRaisesRegex( |
|
|
1365 | with self.assertRaisesRegex( | |
|
1366 | ValueError, "must pass an object with a read" | |
|
1367 | ): | |
|
1341 | 1368 | for chunk in cctx.read_to_iter(True): |
|
1342 | 1369 | pass |
|
1343 | 1370 | |
@@ -1513,7 +1540,9 b' class TestCompressor_chunker(TestCase):' | |||
|
1513 | 1540 | |
|
1514 | 1541 | dctx = zstd.ZstdDecompressor() |
|
1515 | 1542 | |
|
1516 | self.assertEqual(dctx.decompress(b"".join(chunks)), (b"x" * 1000) + (b"y" * 24)) | |
|
1543 | self.assertEqual( | |
|
1544 | dctx.decompress(b"".join(chunks)), (b"x" * 1000) + (b"y" * 24) | |
|
1545 | ) | |
|
1517 | 1546 | |
|
1518 | 1547 | def test_small_chunk_size(self): |
|
1519 | 1548 | cctx = zstd.ZstdCompressor() |
@@ -1533,7 +1562,8 b' class TestCompressor_chunker(TestCase):' | |||
|
1533 | 1562 | |
|
1534 | 1563 | dctx = zstd.ZstdDecompressor() |
|
1535 | 1564 | self.assertEqual( |
|
1536 |
dctx.decompress(b"".join(chunks), max_output_size=10000), |
|
|
1565 | dctx.decompress(b"".join(chunks), max_output_size=10000), | |
|
1566 | b"foo" * 1024, | |
|
1537 | 1567 | ) |
|
1538 | 1568 | |
|
1539 | 1569 | def test_input_types(self): |
@@ -1602,7 +1632,8 b' class TestCompressor_chunker(TestCase):' | |||
|
1602 | 1632 | list(chunker.finish()) |
|
1603 | 1633 | |
|
1604 | 1634 | with self.assertRaisesRegex( |
|
1605 | zstd.ZstdError, r"cannot call compress\(\) after compression finished" | |
|
1635 | zstd.ZstdError, | |
|
1636 | r"cannot call compress\(\) after compression finished", | |
|
1606 | 1637 | ): |
|
1607 | 1638 | list(chunker.compress(b"foo")) |
|
1608 | 1639 | |
@@ -1644,7 +1675,9 b' class TestCompressor_multi_compress_to_b' | |||
|
1644 | 1675 | with self.assertRaises(TypeError): |
|
1645 | 1676 | cctx.multi_compress_to_buffer((1, 2)) |
|
1646 | 1677 | |
|
1647 |
with self.assertRaisesRegex( |
|
|
1678 | with self.assertRaisesRegex( | |
|
1679 | TypeError, "item 0 not a bytes like object" | |
|
1680 | ): | |
|
1648 | 1681 | cctx.multi_compress_to_buffer([u"foo"]) |
|
1649 | 1682 | |
|
1650 | 1683 | def test_empty_input(self): |
@@ -28,9 +28,13 b' class TestCompressor_stream_reader_fuzzi' | |||
|
28 | 28 | original=strategies.sampled_from(random_input_data()), |
|
29 | 29 | level=strategies.integers(min_value=1, max_value=5), |
|
30 | 30 | source_read_size=strategies.integers(1, 16384), |
|
31 |
read_size=strategies.integers( |
|
|
31 | read_size=strategies.integers( | |
|
32 | -1, zstd.COMPRESSION_RECOMMENDED_OUTPUT_SIZE | |
|
33 | ), | |
|
32 | 34 | ) |
|
33 | def test_stream_source_read(self, original, level, source_read_size, read_size): | |
|
35 | def test_stream_source_read( | |
|
36 | self, original, level, source_read_size, read_size | |
|
37 | ): | |
|
34 | 38 | if read_size == 0: |
|
35 | 39 | read_size = -1 |
|
36 | 40 | |
@@ -58,9 +62,13 b' class TestCompressor_stream_reader_fuzzi' | |||
|
58 | 62 | original=strategies.sampled_from(random_input_data()), |
|
59 | 63 | level=strategies.integers(min_value=1, max_value=5), |
|
60 | 64 | source_read_size=strategies.integers(1, 16384), |
|
61 |
read_size=strategies.integers( |
|
|
65 | read_size=strategies.integers( | |
|
66 | -1, zstd.COMPRESSION_RECOMMENDED_OUTPUT_SIZE | |
|
67 | ), | |
|
62 | 68 | ) |
|
63 | def test_buffer_source_read(self, original, level, source_read_size, read_size): | |
|
69 | def test_buffer_source_read( | |
|
70 | self, original, level, source_read_size, read_size | |
|
71 | ): | |
|
64 | 72 | if read_size == 0: |
|
65 | 73 | read_size = -1 |
|
66 | 74 | |
@@ -155,9 +163,13 b' class TestCompressor_stream_reader_fuzzi' | |||
|
155 | 163 | original=strategies.sampled_from(random_input_data()), |
|
156 | 164 | level=strategies.integers(min_value=1, max_value=5), |
|
157 | 165 | source_read_size=strategies.integers(1, 16384), |
|
158 |
read_size=strategies.integers( |
|
|
166 | read_size=strategies.integers( | |
|
167 | 1, zstd.COMPRESSION_RECOMMENDED_OUTPUT_SIZE | |
|
168 | ), | |
|
159 | 169 | ) |
|
160 | def test_stream_source_readinto(self, original, level, source_read_size, read_size): | |
|
170 | def test_stream_source_readinto( | |
|
171 | self, original, level, source_read_size, read_size | |
|
172 | ): | |
|
161 | 173 | refctx = zstd.ZstdCompressor(level=level) |
|
162 | 174 | ref_frame = refctx.compress(original) |
|
163 | 175 | |
@@ -184,9 +196,13 b' class TestCompressor_stream_reader_fuzzi' | |||
|
184 | 196 | original=strategies.sampled_from(random_input_data()), |
|
185 | 197 | level=strategies.integers(min_value=1, max_value=5), |
|
186 | 198 | source_read_size=strategies.integers(1, 16384), |
|
187 |
read_size=strategies.integers( |
|
|
199 | read_size=strategies.integers( | |
|
200 | 1, zstd.COMPRESSION_RECOMMENDED_OUTPUT_SIZE | |
|
201 | ), | |
|
188 | 202 | ) |
|
189 | def test_buffer_source_readinto(self, original, level, source_read_size, read_size): | |
|
203 | def test_buffer_source_readinto( | |
|
204 | self, original, level, source_read_size, read_size | |
|
205 | ): | |
|
190 | 206 | |
|
191 | 207 | refctx = zstd.ZstdCompressor(level=level) |
|
192 | 208 | ref_frame = refctx.compress(original) |
@@ -285,9 +301,13 b' class TestCompressor_stream_reader_fuzzi' | |||
|
285 | 301 | original=strategies.sampled_from(random_input_data()), |
|
286 | 302 | level=strategies.integers(min_value=1, max_value=5), |
|
287 | 303 | source_read_size=strategies.integers(1, 16384), |
|
288 |
read_size=strategies.integers( |
|
|
304 | read_size=strategies.integers( | |
|
305 | -1, zstd.COMPRESSION_RECOMMENDED_OUTPUT_SIZE | |
|
306 | ), | |
|
289 | 307 | ) |
|
290 | def test_stream_source_read1(self, original, level, source_read_size, read_size): | |
|
308 | def test_stream_source_read1( | |
|
309 | self, original, level, source_read_size, read_size | |
|
310 | ): | |
|
291 | 311 | if read_size == 0: |
|
292 | 312 | read_size = -1 |
|
293 | 313 | |
@@ -315,9 +335,13 b' class TestCompressor_stream_reader_fuzzi' | |||
|
315 | 335 | original=strategies.sampled_from(random_input_data()), |
|
316 | 336 | level=strategies.integers(min_value=1, max_value=5), |
|
317 | 337 | source_read_size=strategies.integers(1, 16384), |
|
318 |
read_size=strategies.integers( |
|
|
338 | read_size=strategies.integers( | |
|
339 | -1, zstd.COMPRESSION_RECOMMENDED_OUTPUT_SIZE | |
|
340 | ), | |
|
319 | 341 | ) |
|
320 | def test_buffer_source_read1(self, original, level, source_read_size, read_size): | |
|
342 | def test_buffer_source_read1( | |
|
343 | self, original, level, source_read_size, read_size | |
|
344 | ): | |
|
321 | 345 | if read_size == 0: |
|
322 | 346 | read_size = -1 |
|
323 | 347 | |
@@ -412,7 +436,9 b' class TestCompressor_stream_reader_fuzzi' | |||
|
412 | 436 | original=strategies.sampled_from(random_input_data()), |
|
413 | 437 | level=strategies.integers(min_value=1, max_value=5), |
|
414 | 438 | source_read_size=strategies.integers(1, 16384), |
|
415 |
read_size=strategies.integers( |
|
|
439 | read_size=strategies.integers( | |
|
440 | 1, zstd.COMPRESSION_RECOMMENDED_OUTPUT_SIZE | |
|
441 | ), | |
|
416 | 442 | ) |
|
417 | 443 | def test_stream_source_readinto1( |
|
418 | 444 | self, original, level, source_read_size, read_size |
@@ -446,7 +472,9 b' class TestCompressor_stream_reader_fuzzi' | |||
|
446 | 472 | original=strategies.sampled_from(random_input_data()), |
|
447 | 473 | level=strategies.integers(min_value=1, max_value=5), |
|
448 | 474 | source_read_size=strategies.integers(1, 16384), |
|
449 |
read_size=strategies.integers( |
|
|
475 | read_size=strategies.integers( | |
|
476 | 1, zstd.COMPRESSION_RECOMMENDED_OUTPUT_SIZE | |
|
477 | ), | |
|
450 | 478 | ) |
|
451 | 479 | def test_buffer_source_readinto1( |
|
452 | 480 | self, original, level, source_read_size, read_size |
@@ -576,7 +604,9 b' class TestCompressor_copy_stream_fuzzing' | |||
|
576 | 604 | read_size=strategies.integers(min_value=1, max_value=1048576), |
|
577 | 605 | write_size=strategies.integers(min_value=1, max_value=1048576), |
|
578 | 606 | ) |
|
579 |
def test_read_write_size_variance( |
|
|
607 | def test_read_write_size_variance( | |
|
608 | self, original, level, read_size, write_size | |
|
609 | ): | |
|
580 | 610 | refctx = zstd.ZstdCompressor(level=level) |
|
581 | 611 | ref_frame = refctx.compress(original) |
|
582 | 612 | |
@@ -585,7 +615,11 b' class TestCompressor_copy_stream_fuzzing' | |||
|
585 | 615 | dest = io.BytesIO() |
|
586 | 616 | |
|
587 | 617 | cctx.copy_stream( |
|
588 | source, dest, size=len(original), read_size=read_size, write_size=write_size | |
|
618 | source, | |
|
619 | dest, | |
|
620 | size=len(original), | |
|
621 | read_size=read_size, | |
|
622 | write_size=write_size, | |
|
589 | 623 | ) |
|
590 | 624 | |
|
591 | 625 | self.assertEqual(dest.getvalue(), ref_frame) |
@@ -675,7 +709,9 b' class TestCompressor_compressobj_fuzzing' | |||
|
675 | 709 | decompressed_chunks.append(dobj.decompress(chunk)) |
|
676 | 710 | |
|
677 | 711 | self.assertEqual( |
|
678 | dctx.decompress(b"".join(compressed_chunks), max_output_size=len(original)), | |
|
712 | dctx.decompress( | |
|
713 | b"".join(compressed_chunks), max_output_size=len(original) | |
|
714 | ), | |
|
679 | 715 | original, |
|
680 | 716 | ) |
|
681 | 717 | self.assertEqual(b"".join(decompressed_chunks), original) |
@@ -690,7 +726,9 b' class TestCompressor_read_to_iter_fuzzin' | |||
|
690 | 726 | read_size=strategies.integers(min_value=1, max_value=4096), |
|
691 | 727 | write_size=strategies.integers(min_value=1, max_value=4096), |
|
692 | 728 | ) |
|
693 |
def test_read_write_size_variance( |
|
|
729 | def test_read_write_size_variance( | |
|
730 | self, original, level, read_size, write_size | |
|
731 | ): | |
|
694 | 732 | refcctx = zstd.ZstdCompressor(level=level) |
|
695 | 733 | ref_frame = refcctx.compress(original) |
|
696 | 734 | |
@@ -699,7 +737,10 b' class TestCompressor_read_to_iter_fuzzin' | |||
|
699 | 737 | cctx = zstd.ZstdCompressor(level=level) |
|
700 | 738 | chunks = list( |
|
701 | 739 | cctx.read_to_iter( |
|
702 | source, size=len(original), read_size=read_size, write_size=write_size | |
|
740 | source, | |
|
741 | size=len(original), | |
|
742 | read_size=read_size, | |
|
743 | write_size=write_size, | |
|
703 | 744 | ) |
|
704 | 745 | ) |
|
705 | 746 | |
@@ -710,7 +751,9 b' class TestCompressor_read_to_iter_fuzzin' | |||
|
710 | 751 | class TestCompressor_multi_compress_to_buffer_fuzzing(TestCase): |
|
711 | 752 | @hypothesis.given( |
|
712 | 753 | original=strategies.lists( |
|
713 |
strategies.sampled_from(random_input_data()), |
|
|
754 | strategies.sampled_from(random_input_data()), | |
|
755 | min_size=1, | |
|
756 | max_size=1024, | |
|
714 | 757 | ), |
|
715 | 758 | threads=strategies.integers(min_value=1, max_value=8), |
|
716 | 759 | use_dict=strategies.booleans(), |
@@ -776,7 +819,8 b' class TestCompressor_chunker_fuzzing(Tes' | |||
|
776 | 819 | dctx = zstd.ZstdDecompressor() |
|
777 | 820 | |
|
778 | 821 | self.assertEqual( |
|
779 |
dctx.decompress(b"".join(chunks), max_output_size=len(original)), |
|
|
822 | dctx.decompress(b"".join(chunks), max_output_size=len(original)), | |
|
823 | original, | |
|
780 | 824 | ) |
|
781 | 825 | |
|
782 | 826 | self.assertTrue(all(len(chunk) == chunk_size for chunk in chunks[:-1])) |
@@ -794,7 +838,9 b' class TestCompressor_chunker_fuzzing(Tes' | |||
|
794 | 838 | input_sizes=strategies.data(), |
|
795 | 839 | flushes=strategies.data(), |
|
796 | 840 | ) |
|
797 | def test_flush_block(self, original, level, chunk_size, input_sizes, flushes): | |
|
841 | def test_flush_block( | |
|
842 | self, original, level, chunk_size, input_sizes, flushes | |
|
843 | ): | |
|
798 | 844 | cctx = zstd.ZstdCompressor(level=level) |
|
799 | 845 | chunker = cctx.chunker(chunk_size=chunk_size) |
|
800 | 846 | |
@@ -830,7 +876,9 b' class TestCompressor_chunker_fuzzing(Tes' | |||
|
830 | 876 | decompressed_chunks.append(dobj.decompress(b"".join(chunks))) |
|
831 | 877 | |
|
832 | 878 | self.assertEqual( |
|
833 | dctx.decompress(b"".join(compressed_chunks), max_output_size=len(original)), | |
|
879 | dctx.decompress( | |
|
880 | b"".join(compressed_chunks), max_output_size=len(original) | |
|
881 | ), | |
|
834 | 882 | original, |
|
835 | 883 | ) |
|
836 | 884 | self.assertEqual(b"".join(decompressed_chunks), original) |
@@ -65,7 +65,9 b' class TestCompressionParameters(TestCase' | |||
|
65 | 65 | p = zstd.ZstdCompressionParameters(threads=4) |
|
66 | 66 | self.assertEqual(p.threads, 4) |
|
67 | 67 | |
|
68 |
p = zstd.ZstdCompressionParameters( |
|
|
68 | p = zstd.ZstdCompressionParameters( | |
|
69 | threads=2, job_size=1048576, overlap_log=6 | |
|
70 | ) | |
|
69 | 71 | self.assertEqual(p.threads, 2) |
|
70 | 72 | self.assertEqual(p.job_size, 1048576) |
|
71 | 73 | self.assertEqual(p.overlap_log, 6) |
@@ -128,7 +130,9 b' class TestCompressionParameters(TestCase' | |||
|
128 | 130 | with self.assertRaisesRegex( |
|
129 | 131 | ValueError, "cannot specify both ldm_hash_rate_log" |
|
130 | 132 | ): |
|
131 |
zstd.ZstdCompressionParameters( |
|
|
133 | zstd.ZstdCompressionParameters( | |
|
134 | ldm_hash_rate_log=8, ldm_hash_every_log=4 | |
|
135 | ) | |
|
132 | 136 | |
|
133 | 137 | p = zstd.ZstdCompressionParameters(ldm_hash_rate_log=8) |
|
134 | 138 | self.assertEqual(p.ldm_hash_every_log, 8) |
@@ -137,7 +141,9 b' class TestCompressionParameters(TestCase' | |||
|
137 | 141 | self.assertEqual(p.ldm_hash_every_log, 16) |
|
138 | 142 | |
|
139 | 143 | def test_overlap_log(self): |
|
140 |
with self.assertRaisesRegex( |
|
|
144 | with self.assertRaisesRegex( | |
|
145 | ValueError, "cannot specify both overlap_log" | |
|
146 | ): | |
|
141 | 147 | zstd.ZstdCompressionParameters(overlap_log=1, overlap_size_log=9) |
|
142 | 148 | |
|
143 | 149 | p = zstd.ZstdCompressionParameters(overlap_log=2) |
@@ -169,10 +175,14 b' class TestFrameParameters(TestCase):' | |||
|
169 | 175 | zstd.get_frame_parameters(u"foobarbaz") |
|
170 | 176 | |
|
171 | 177 | def test_invalid_input_sizes(self): |
|
172 |
with self.assertRaisesRegex( |
|
|
178 | with self.assertRaisesRegex( | |
|
179 | zstd.ZstdError, "not enough data for frame" | |
|
180 | ): | |
|
173 | 181 | zstd.get_frame_parameters(b"") |
|
174 | 182 | |
|
175 |
with self.assertRaisesRegex( |
|
|
183 | with self.assertRaisesRegex( | |
|
184 | zstd.ZstdError, "not enough data for frame" | |
|
185 | ): | |
|
176 | 186 | zstd.get_frame_parameters(zstd.FRAME_HEADER) |
|
177 | 187 | |
|
178 | 188 | def test_invalid_frame(self): |
@@ -201,7 +211,9 b' class TestFrameParameters(TestCase):' | |||
|
201 | 211 | self.assertTrue(params.has_checksum) |
|
202 | 212 | |
|
203 | 213 | # Upper 2 bits indicate content size. |
|
204 |
params = zstd.get_frame_parameters( |
|
|
214 | params = zstd.get_frame_parameters( | |
|
215 | zstd.FRAME_HEADER + b"\x40\x00\xff\x00" | |
|
216 | ) | |
|
205 | 217 | self.assertEqual(params.content_size, 511) |
|
206 | 218 | self.assertEqual(params.window_size, 1024) |
|
207 | 219 | self.assertEqual(params.dict_id, 0) |
@@ -215,7 +227,9 b' class TestFrameParameters(TestCase):' | |||
|
215 | 227 | self.assertFalse(params.has_checksum) |
|
216 | 228 | |
|
217 | 229 | # Set multiple things. |
|
218 |
params = zstd.get_frame_parameters( |
|
|
230 | params = zstd.get_frame_parameters( | |
|
231 | zstd.FRAME_HEADER + b"\x45\x40\x0f\x10\x00" | |
|
232 | ) | |
|
219 | 233 | self.assertEqual(params.content_size, 272) |
|
220 | 234 | self.assertEqual(params.window_size, 262144) |
|
221 | 235 | self.assertEqual(params.dict_id, 15) |
@@ -23,7 +23,9 b' s_windowlog = strategies.integers(' | |||
|
23 | 23 | s_chainlog = strategies.integers( |
|
24 | 24 | min_value=zstd.CHAINLOG_MIN, max_value=zstd.CHAINLOG_MAX |
|
25 | 25 | ) |
|
26 | s_hashlog = strategies.integers(min_value=zstd.HASHLOG_MIN, max_value=zstd.HASHLOG_MAX) | |
|
26 | s_hashlog = strategies.integers( | |
|
27 | min_value=zstd.HASHLOG_MIN, max_value=zstd.HASHLOG_MAX | |
|
28 | ) | |
|
27 | 29 | s_searchlog = strategies.integers( |
|
28 | 30 | min_value=zstd.SEARCHLOG_MIN, max_value=zstd.SEARCHLOG_MAX |
|
29 | 31 | ) |
@@ -61,7 +63,14 b' class TestCompressionParametersHypothesi' | |||
|
61 | 63 | s_strategy, |
|
62 | 64 | ) |
|
63 | 65 | def test_valid_init( |
|
64 | self, windowlog, chainlog, hashlog, searchlog, minmatch, targetlength, strategy | |
|
66 | self, | |
|
67 | windowlog, | |
|
68 | chainlog, | |
|
69 | hashlog, | |
|
70 | searchlog, | |
|
71 | minmatch, | |
|
72 | targetlength, | |
|
73 | strategy, | |
|
65 | 74 | ): |
|
66 | 75 | zstd.ZstdCompressionParameters( |
|
67 | 76 | window_log=windowlog, |
@@ -83,7 +92,14 b' class TestCompressionParametersHypothesi' | |||
|
83 | 92 | s_strategy, |
|
84 | 93 | ) |
|
85 | 94 | def test_estimated_compression_context_size( |
|
86 | self, windowlog, chainlog, hashlog, searchlog, minmatch, targetlength, strategy | |
|
95 | self, | |
|
96 | windowlog, | |
|
97 | chainlog, | |
|
98 | hashlog, | |
|
99 | searchlog, | |
|
100 | minmatch, | |
|
101 | targetlength, | |
|
102 | strategy, | |
|
87 | 103 | ): |
|
88 | 104 | if minmatch == zstd.MINMATCH_MIN and strategy in ( |
|
89 | 105 | zstd.STRATEGY_FAST, |
@@ -170,11 +170,15 b' class TestDecompressor_decompress(TestCa' | |||
|
170 | 170 | dctx.decompress(compressed, max_output_size=len(source) - 1) |
|
171 | 171 | |
|
172 | 172 | # Input size + 1 works |
|
173 |
decompressed = dctx.decompress( |
|
|
173 | decompressed = dctx.decompress( | |
|
174 | compressed, max_output_size=len(source) + 1 | |
|
175 | ) | |
|
174 | 176 | self.assertEqual(decompressed, source) |
|
175 | 177 | |
|
176 | 178 | # A much larger buffer works. |
|
177 |
decompressed = dctx.decompress( |
|
|
179 | decompressed = dctx.decompress( | |
|
180 | compressed, max_output_size=len(source) * 64 | |
|
181 | ) | |
|
178 | 182 | self.assertEqual(decompressed, source) |
|
179 | 183 | |
|
180 | 184 | def test_stupidly_large_output_buffer(self): |
@@ -237,7 +241,8 b' class TestDecompressor_decompress(TestCa' | |||
|
237 | 241 | dctx = zstd.ZstdDecompressor(max_window_size=2 ** zstd.WINDOWLOG_MIN) |
|
238 | 242 | |
|
239 | 243 | with self.assertRaisesRegex( |
|
240 | zstd.ZstdError, "decompression error: Frame requires too much memory" | |
|
244 | zstd.ZstdError, | |
|
245 | "decompression error: Frame requires too much memory", | |
|
241 | 246 | ): |
|
242 | 247 | dctx.decompress(frame, max_output_size=len(source)) |
|
243 | 248 | |
@@ -291,7 +296,9 b' class TestDecompressor_copy_stream(TestC' | |||
|
291 | 296 | self.assertEqual(w, len(source.getvalue())) |
|
292 | 297 | |
|
293 | 298 | def test_read_write_size(self): |
|
294 | source = OpCountingBytesIO(zstd.ZstdCompressor().compress(b"foobarfoobar")) | |
|
299 | source = OpCountingBytesIO( | |
|
300 | zstd.ZstdCompressor().compress(b"foobarfoobar") | |
|
301 | ) | |
|
295 | 302 | |
|
296 | 303 | dest = OpCountingBytesIO() |
|
297 | 304 | dctx = zstd.ZstdDecompressor() |
@@ -309,7 +316,9 b' class TestDecompressor_stream_reader(Tes' | |||
|
309 | 316 | dctx = zstd.ZstdDecompressor() |
|
310 | 317 | |
|
311 | 318 | with dctx.stream_reader(b"foo") as reader: |
|
312 |
with self.assertRaisesRegex( |
|
|
319 | with self.assertRaisesRegex( | |
|
320 | ValueError, "cannot __enter__ multiple times" | |
|
321 | ): | |
|
313 | 322 | with reader as reader2: |
|
314 | 323 | pass |
|
315 | 324 | |
@@ -474,7 +483,9 b' class TestDecompressor_stream_reader(Tes' | |||
|
474 | 483 | dctx = zstd.ZstdDecompressor() |
|
475 | 484 | |
|
476 | 485 | with dctx.stream_reader(frame) as reader: |
|
477 |
with self.assertRaisesRegex( |
|
|
486 | with self.assertRaisesRegex( | |
|
487 | ValueError, "cannot seek to negative position" | |
|
488 | ): | |
|
478 | 489 | reader.seek(-1, os.SEEK_SET) |
|
479 | 490 | |
|
480 | 491 | reader.read(1) |
@@ -490,7 +501,8 b' class TestDecompressor_stream_reader(Tes' | |||
|
490 | 501 | reader.seek(-1, os.SEEK_CUR) |
|
491 | 502 | |
|
492 | 503 | with self.assertRaisesRegex( |
|
493 | ValueError, "zstd decompression streams cannot be seeked with SEEK_END" | |
|
504 | ValueError, | |
|
505 | "zstd decompression streams cannot be seeked with SEEK_END", | |
|
494 | 506 | ): |
|
495 | 507 | reader.seek(0, os.SEEK_END) |
|
496 | 508 | |
@@ -743,7 +755,9 b' class TestDecompressor_stream_reader(Tes' | |||
|
743 | 755 | |
|
744 | 756 | def test_read_lines(self): |
|
745 | 757 | cctx = zstd.ZstdCompressor() |
|
746 | source = b"\n".join(("line %d" % i).encode("ascii") for i in range(1024)) | |
|
758 | source = b"\n".join( | |
|
759 | ("line %d" % i).encode("ascii") for i in range(1024) | |
|
760 | ) | |
|
747 | 761 | |
|
748 | 762 | frame = cctx.compress(source) |
|
749 | 763 | |
@@ -821,7 +835,9 b' class TestDecompressor_decompressobj(Tes' | |||
|
821 | 835 | dobj = dctx.decompressobj() |
|
822 | 836 | dobj.decompress(data) |
|
823 | 837 | |
|
824 |
with self.assertRaisesRegex( |
|
|
838 | with self.assertRaisesRegex( | |
|
839 | zstd.ZstdError, "cannot use a decompressobj" | |
|
840 | ): | |
|
825 | 841 | dobj.decompress(data) |
|
826 | 842 | self.assertIsNone(dobj.flush()) |
|
827 | 843 | |
@@ -1124,7 +1140,9 b' class TestDecompressor_read_to_iter(Test' | |||
|
1124 | 1140 | # Buffer protocol works. |
|
1125 | 1141 | dctx.read_to_iter(b"foobar") |
|
1126 | 1142 | |
|
1127 |
with self.assertRaisesRegex( |
|
|
1143 | with self.assertRaisesRegex( | |
|
1144 | ValueError, "must pass an object with a read" | |
|
1145 | ): | |
|
1128 | 1146 | b"".join(dctx.read_to_iter(True)) |
|
1129 | 1147 | |
|
1130 | 1148 | def test_empty_input(self): |
@@ -1226,7 +1244,9 b' class TestDecompressor_read_to_iter(Test' | |||
|
1226 | 1244 | decompressed = b"".join(chunks) |
|
1227 | 1245 | self.assertEqual(decompressed, source.getvalue()) |
|
1228 | 1246 | |
|
1229 | @unittest.skipUnless("ZSTD_SLOW_TESTS" in os.environ, "ZSTD_SLOW_TESTS not set") | |
|
1247 | @unittest.skipUnless( | |
|
1248 | "ZSTD_SLOW_TESTS" in os.environ, "ZSTD_SLOW_TESTS not set" | |
|
1249 | ) | |
|
1230 | 1250 | def test_large_input(self): |
|
1231 | 1251 | bytes = list(struct.Struct(">B").pack(i) for i in range(256)) |
|
1232 | 1252 | compressed = NonClosingBytesIO() |
@@ -1241,13 +1261,16 b' class TestDecompressor_read_to_iter(Test' | |||
|
1241 | 1261 | len(compressed.getvalue()) |
|
1242 | 1262 | > zstd.DECOMPRESSION_RECOMMENDED_INPUT_SIZE |
|
1243 | 1263 | ) |
|
1244 | have_raw = input_size > zstd.DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE * 2 | |
|
1264 | have_raw = ( | |
|
1265 | input_size > zstd.DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE * 2 | |
|
1266 | ) | |
|
1245 | 1267 | if have_compressed and have_raw: |
|
1246 | 1268 | break |
|
1247 | 1269 | |
|
1248 | 1270 | compressed = io.BytesIO(compressed.getvalue()) |
|
1249 | 1271 | self.assertGreater( |
|
1250 |
len(compressed.getvalue()), |
|
|
1272 | len(compressed.getvalue()), | |
|
1273 | zstd.DECOMPRESSION_RECOMMENDED_INPUT_SIZE, | |
|
1251 | 1274 | ) |
|
1252 | 1275 | |
|
1253 | 1276 | dctx = zstd.ZstdDecompressor() |
@@ -1303,7 +1326,9 b' class TestDecompressor_read_to_iter(Test' | |||
|
1303 | 1326 | self.assertEqual(streamed, source.getvalue()) |
|
1304 | 1327 | |
|
1305 | 1328 | def test_read_write_size(self): |
|
1306 | source = OpCountingBytesIO(zstd.ZstdCompressor().compress(b"foobarfoobar")) | |
|
1329 | source = OpCountingBytesIO( | |
|
1330 | zstd.ZstdCompressor().compress(b"foobarfoobar") | |
|
1331 | ) | |
|
1307 | 1332 | dctx = zstd.ZstdDecompressor() |
|
1308 | 1333 | for chunk in dctx.read_to_iter(source, read_size=1, write_size=1): |
|
1309 | 1334 | self.assertEqual(len(chunk), 1) |
@@ -1355,10 +1380,14 b' class TestDecompressor_content_dict_chai' | |||
|
1355 | 1380 | ): |
|
1356 | 1381 | dctx.decompress_content_dict_chain([zstd.FRAME_HEADER]) |
|
1357 | 1382 | |
|
1358 | with self.assertRaisesRegex(ValueError, "chunk 0 is not a valid zstd frame"): | |
|
1383 | with self.assertRaisesRegex( | |
|
1384 | ValueError, "chunk 0 is not a valid zstd frame" | |
|
1385 | ): | |
|
1359 | 1386 | dctx.decompress_content_dict_chain([b"foo" * 8]) |
|
1360 | 1387 | |
|
1361 |
no_size = zstd.ZstdCompressor(write_content_size=False).compress( |
|
|
1388 | no_size = zstd.ZstdCompressor(write_content_size=False).compress( | |
|
1389 | b"foo" * 64 | |
|
1390 | ) | |
|
1362 | 1391 | |
|
1363 | 1392 | with self.assertRaisesRegex( |
|
1364 | 1393 | ValueError, "chunk 0 missing content size in frame" |
@@ -1389,10 +1418,14 b' class TestDecompressor_content_dict_chai' | |||
|
1389 | 1418 | ): |
|
1390 | 1419 | dctx.decompress_content_dict_chain([initial, zstd.FRAME_HEADER]) |
|
1391 | 1420 | |
|
1392 | with self.assertRaisesRegex(ValueError, "chunk 1 is not a valid zstd frame"): | |
|
1421 | with self.assertRaisesRegex( | |
|
1422 | ValueError, "chunk 1 is not a valid zstd frame" | |
|
1423 | ): | |
|
1393 | 1424 | dctx.decompress_content_dict_chain([initial, b"foo" * 8]) |
|
1394 | 1425 | |
|
1395 |
no_size = zstd.ZstdCompressor(write_content_size=False).compress( |
|
|
1426 | no_size = zstd.ZstdCompressor(write_content_size=False).compress( | |
|
1427 | b"foo" * 64 | |
|
1428 | ) | |
|
1396 | 1429 | |
|
1397 | 1430 | with self.assertRaisesRegex( |
|
1398 | 1431 | ValueError, "chunk 1 missing content size in frame" |
@@ -1400,7 +1433,9 b' class TestDecompressor_content_dict_chai' | |||
|
1400 | 1433 | dctx.decompress_content_dict_chain([initial, no_size]) |
|
1401 | 1434 | |
|
1402 | 1435 | # Corrupt second frame. |
|
1403 | cctx = zstd.ZstdCompressor(dict_data=zstd.ZstdCompressionDict(b"foo" * 64)) | |
|
1436 | cctx = zstd.ZstdCompressor( | |
|
1437 | dict_data=zstd.ZstdCompressionDict(b"foo" * 64) | |
|
1438 | ) | |
|
1404 | 1439 | frame = cctx.compress(b"bar" * 64) |
|
1405 | 1440 | frame = frame[0:12] + frame[15:] |
|
1406 | 1441 | |
@@ -1447,7 +1482,9 b' class TestDecompressor_multi_decompress_' | |||
|
1447 | 1482 | with self.assertRaises(TypeError): |
|
1448 | 1483 | dctx.multi_decompress_to_buffer((1, 2)) |
|
1449 | 1484 | |
|
1450 |
with self.assertRaisesRegex( |
|
|
1485 | with self.assertRaisesRegex( | |
|
1486 | TypeError, "item 0 not a bytes like object" | |
|
1487 | ): | |
|
1451 | 1488 | dctx.multi_decompress_to_buffer([u"foo"]) |
|
1452 | 1489 | |
|
1453 | 1490 | with self.assertRaisesRegex( |
@@ -1491,7 +1528,9 b' class TestDecompressor_multi_decompress_' | |||
|
1491 | 1528 | if not hasattr(dctx, "multi_decompress_to_buffer"): |
|
1492 | 1529 | self.skipTest("multi_decompress_to_buffer not available") |
|
1493 | 1530 | |
|
1494 |
result = dctx.multi_decompress_to_buffer( |
|
|
1531 | result = dctx.multi_decompress_to_buffer( | |
|
1532 | frames, decompressed_sizes=sizes | |
|
1533 | ) | |
|
1495 | 1534 | |
|
1496 | 1535 | self.assertEqual(len(result), len(frames)) |
|
1497 | 1536 | self.assertEqual(result.size(), sum(map(len, original))) |
@@ -1582,10 +1621,15 b' class TestDecompressor_multi_decompress_' | |||
|
1582 | 1621 | # And a manual mode. |
|
1583 | 1622 | b = b"".join([frames[0].tobytes(), frames[1].tobytes()]) |
|
1584 | 1623 | b1 = zstd.BufferWithSegments( |
|
1585 | b, struct.pack("=QQQQ", 0, len(frames[0]), len(frames[0]), len(frames[1])) | |
|
1624 | b, | |
|
1625 | struct.pack( | |
|
1626 | "=QQQQ", 0, len(frames[0]), len(frames[0]), len(frames[1]) | |
|
1627 | ), | |
|
1586 | 1628 | ) |
|
1587 | 1629 | |
|
1588 | b = b"".join([frames[2].tobytes(), frames[3].tobytes(), frames[4].tobytes()]) | |
|
1630 | b = b"".join( | |
|
1631 | [frames[2].tobytes(), frames[3].tobytes(), frames[4].tobytes()] | |
|
1632 | ) | |
|
1589 | 1633 | b2 = zstd.BufferWithSegments( |
|
1590 | 1634 | b, |
|
1591 | 1635 | struct.pack( |
@@ -196,7 +196,9 b' class TestDecompressor_stream_reader_fuz' | |||
|
196 | 196 | streaming=strategies.booleans(), |
|
197 | 197 | source_read_size=strategies.integers(1, 1048576), |
|
198 | 198 | ) |
|
199 | def test_stream_source_readall(self, original, level, streaming, source_read_size): | |
|
199 | def test_stream_source_readall( | |
|
200 | self, original, level, streaming, source_read_size | |
|
201 | ): | |
|
200 | 202 | cctx = zstd.ZstdCompressor(level=level) |
|
201 | 203 | |
|
202 | 204 | if streaming: |
@@ -398,7 +400,9 b' class TestDecompressor_stream_writer_fuz' | |||
|
398 | 400 | write_size=strategies.integers(min_value=1, max_value=8192), |
|
399 | 401 | input_sizes=strategies.data(), |
|
400 | 402 | ) |
|
401 | def test_write_size_variance(self, original, level, write_size, input_sizes): | |
|
403 | def test_write_size_variance( | |
|
404 | self, original, level, write_size, input_sizes | |
|
405 | ): | |
|
402 | 406 | cctx = zstd.ZstdCompressor(level=level) |
|
403 | 407 | frame = cctx.compress(original) |
|
404 | 408 | |
@@ -433,7 +437,9 b' class TestDecompressor_copy_stream_fuzzi' | |||
|
433 | 437 | read_size=strategies.integers(min_value=1, max_value=8192), |
|
434 | 438 | write_size=strategies.integers(min_value=1, max_value=8192), |
|
435 | 439 | ) |
|
436 |
def test_read_write_size_variance( |
|
|
440 | def test_read_write_size_variance( | |
|
441 | self, original, level, read_size, write_size | |
|
442 | ): | |
|
437 | 443 | cctx = zstd.ZstdCompressor(level=level) |
|
438 | 444 | frame = cctx.compress(original) |
|
439 | 445 | |
@@ -441,7 +447,9 b' class TestDecompressor_copy_stream_fuzzi' | |||
|
441 | 447 | dest = io.BytesIO() |
|
442 | 448 | |
|
443 | 449 | dctx = zstd.ZstdDecompressor() |
|
444 | dctx.copy_stream(source, dest, read_size=read_size, write_size=write_size) | |
|
450 | dctx.copy_stream( | |
|
451 | source, dest, read_size=read_size, write_size=write_size | |
|
452 | ) | |
|
445 | 453 | |
|
446 | 454 | self.assertEqual(dest.getvalue(), original) |
|
447 | 455 | |
@@ -490,11 +498,14 b' class TestDecompressor_decompressobj_fuz' | |||
|
490 | 498 | original=strategies.sampled_from(random_input_data()), |
|
491 | 499 | level=strategies.integers(min_value=1, max_value=5), |
|
492 | 500 | write_size=strategies.integers( |
|
493 | min_value=1, max_value=4 * zstd.DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE | |
|
501 | min_value=1, | |
|
502 | max_value=4 * zstd.DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE, | |
|
494 | 503 | ), |
|
495 | 504 | chunk_sizes=strategies.data(), |
|
496 | 505 | ) |
|
497 | def test_random_output_sizes(self, original, level, write_size, chunk_sizes): | |
|
506 | def test_random_output_sizes( | |
|
507 | self, original, level, write_size, chunk_sizes | |
|
508 | ): | |
|
498 | 509 | cctx = zstd.ZstdCompressor(level=level) |
|
499 | 510 | frame = cctx.compress(original) |
|
500 | 511 | |
@@ -524,7 +535,9 b' class TestDecompressor_read_to_iter_fuzz' | |||
|
524 | 535 | read_size=strategies.integers(min_value=1, max_value=4096), |
|
525 | 536 | write_size=strategies.integers(min_value=1, max_value=4096), |
|
526 | 537 | ) |
|
527 |
def test_read_write_size_variance( |
|
|
538 | def test_read_write_size_variance( | |
|
539 | self, original, level, read_size, write_size | |
|
540 | ): | |
|
528 | 541 | cctx = zstd.ZstdCompressor(level=level) |
|
529 | 542 | frame = cctx.compress(original) |
|
530 | 543 | |
@@ -532,7 +545,9 b' class TestDecompressor_read_to_iter_fuzz' | |||
|
532 | 545 | |
|
533 | 546 | dctx = zstd.ZstdDecompressor() |
|
534 | 547 | chunks = list( |
|
535 | dctx.read_to_iter(source, read_size=read_size, write_size=write_size) | |
|
548 | dctx.read_to_iter( | |
|
549 | source, read_size=read_size, write_size=write_size | |
|
550 | ) | |
|
536 | 551 | ) |
|
537 | 552 | |
|
538 | 553 | self.assertEqual(b"".join(chunks), original) |
@@ -542,7 +557,9 b' class TestDecompressor_read_to_iter_fuzz' | |||
|
542 | 557 | class TestDecompressor_multi_decompress_to_buffer_fuzzing(TestCase): |
|
543 | 558 | @hypothesis.given( |
|
544 | 559 | original=strategies.lists( |
|
545 |
strategies.sampled_from(random_input_data()), |
|
|
560 | strategies.sampled_from(random_input_data()), | |
|
561 | min_size=1, | |
|
562 | max_size=1024, | |
|
546 | 563 | ), |
|
547 | 564 | threads=strategies.integers(min_value=1, max_value=8), |
|
548 | 565 | use_dict=strategies.booleans(), |
@@ -51,11 +51,15 b' class TestTrainDictionary(TestCase):' | |||
|
51 | 51 | self.assertEqual(d.d, 16) |
|
52 | 52 | |
|
53 | 53 | def test_set_dict_id(self): |
|
54 | d = zstd.train_dictionary(8192, generate_samples(), k=64, d=16, dict_id=42) | |
|
54 | d = zstd.train_dictionary( | |
|
55 | 8192, generate_samples(), k=64, d=16, dict_id=42 | |
|
56 | ) | |
|
55 | 57 | self.assertEqual(d.dict_id(), 42) |
|
56 | 58 | |
|
57 | 59 | def test_optimize(self): |
|
58 | d = zstd.train_dictionary(8192, generate_samples(), threads=-1, steps=1, d=16) | |
|
60 | d = zstd.train_dictionary( | |
|
61 | 8192, generate_samples(), threads=-1, steps=1, d=16 | |
|
62 | ) | |
|
59 | 63 | |
|
60 | 64 | # This varies by platform. |
|
61 | 65 | self.assertIn(d.k, (50, 2000)) |
@@ -71,10 +75,14 b' class TestCompressionDict(TestCase):' | |||
|
71 | 75 | def test_bad_precompute_compress(self): |
|
72 | 76 | d = zstd.train_dictionary(8192, generate_samples(), k=64, d=16) |
|
73 | 77 | |
|
74 |
with self.assertRaisesRegex( |
|
|
78 | with self.assertRaisesRegex( | |
|
79 | ValueError, "must specify one of level or " | |
|
80 | ): | |
|
75 | 81 | d.precompute_compress() |
|
76 | 82 | |
|
77 | with self.assertRaisesRegex(ValueError, "must only specify one of level or "): | |
|
83 | with self.assertRaisesRegex( | |
|
84 | ValueError, "must only specify one of level or " | |
|
85 | ): | |
|
78 | 86 | d.precompute_compress( |
|
79 | 87 | level=3, compression_params=zstd.CompressionParameters() |
|
80 | 88 | ) |
@@ -88,5 +96,7 b' class TestCompressionDict(TestCase):' | |||
|
88 | 96 | d = zstd.ZstdCompressionDict( |
|
89 | 97 | b"dictcontent" * 64, dict_type=zstd.DICT_TYPE_FULLDICT |
|
90 | 98 | ) |
|
91 | with self.assertRaisesRegex(zstd.ZstdError, "unable to precompute dictionary"): | |
|
99 | with self.assertRaisesRegex( | |
|
100 | zstd.ZstdError, "unable to precompute dictionary" | |
|
101 | ): | |
|
92 | 102 | d.precompute_compress(level=1) |
@@ -299,10 +299,14 b' class ZstdCompressionParameters(object):' | |||
|
299 | 299 | _set_compression_parameter(params, lib.ZSTD_c_chainLog, chain_log) |
|
300 | 300 | _set_compression_parameter(params, lib.ZSTD_c_searchLog, search_log) |
|
301 | 301 | _set_compression_parameter(params, lib.ZSTD_c_minMatch, min_match) |
|
302 | _set_compression_parameter(params, lib.ZSTD_c_targetLength, target_length) | |
|
302 | _set_compression_parameter( | |
|
303 | params, lib.ZSTD_c_targetLength, target_length | |
|
304 | ) | |
|
303 | 305 | |
|
304 | 306 | if strategy != -1 and compression_strategy != -1: |
|
305 | raise ValueError("cannot specify both compression_strategy and strategy") | |
|
307 | raise ValueError( | |
|
308 | "cannot specify both compression_strategy and strategy" | |
|
309 | ) | |
|
306 | 310 | |
|
307 | 311 | if compression_strategy != -1: |
|
308 | 312 | strategy = compression_strategy |
@@ -313,12 +317,16 b' class ZstdCompressionParameters(object):' | |||
|
313 | 317 | _set_compression_parameter( |
|
314 | 318 | params, lib.ZSTD_c_contentSizeFlag, write_content_size |
|
315 | 319 | ) |
|
316 | _set_compression_parameter(params, lib.ZSTD_c_checksumFlag, write_checksum) | |
|
320 | _set_compression_parameter( | |
|
321 | params, lib.ZSTD_c_checksumFlag, write_checksum | |
|
322 | ) | |
|
317 | 323 | _set_compression_parameter(params, lib.ZSTD_c_dictIDFlag, write_dict_id) |
|
318 | 324 | _set_compression_parameter(params, lib.ZSTD_c_jobSize, job_size) |
|
319 | 325 | |
|
320 | 326 | if overlap_log != -1 and overlap_size_log != -1: |
|
321 | raise ValueError("cannot specify both overlap_log and overlap_size_log") | |
|
327 | raise ValueError( | |
|
328 | "cannot specify both overlap_log and overlap_size_log" | |
|
329 | ) | |
|
322 | 330 | |
|
323 | 331 | if overlap_size_log != -1: |
|
324 | 332 | overlap_log = overlap_size_log |
@@ -326,12 +334,16 b' class ZstdCompressionParameters(object):' | |||
|
326 | 334 | overlap_log = 0 |
|
327 | 335 | |
|
328 | 336 | _set_compression_parameter(params, lib.ZSTD_c_overlapLog, overlap_log) |
|
329 | _set_compression_parameter(params, lib.ZSTD_c_forceMaxWindow, force_max_window) | |
|
337 | _set_compression_parameter( | |
|
338 | params, lib.ZSTD_c_forceMaxWindow, force_max_window | |
|
339 | ) | |
|
330 | 340 | _set_compression_parameter( |
|
331 | 341 | params, lib.ZSTD_c_enableLongDistanceMatching, enable_ldm |
|
332 | 342 | ) |
|
333 | 343 | _set_compression_parameter(params, lib.ZSTD_c_ldmHashLog, ldm_hash_log) |
|
334 |
_set_compression_parameter( |
|
|
344 | _set_compression_parameter( | |
|
345 | params, lib.ZSTD_c_ldmMinMatch, ldm_min_match | |
|
346 | ) | |
|
335 | 347 | _set_compression_parameter( |
|
336 | 348 | params, lib.ZSTD_c_ldmBucketSizeLog, ldm_bucket_size_log |
|
337 | 349 | ) |
@@ -346,7 +358,9 b' class ZstdCompressionParameters(object):' | |||
|
346 | 358 | elif ldm_hash_rate_log == -1: |
|
347 | 359 | ldm_hash_rate_log = 0 |
|
348 | 360 | |
|
349 | _set_compression_parameter(params, lib.ZSTD_c_ldmHashRateLog, ldm_hash_rate_log) | |
|
361 | _set_compression_parameter( | |
|
362 | params, lib.ZSTD_c_ldmHashRateLog, ldm_hash_rate_log | |
|
363 | ) | |
|
350 | 364 | |
|
351 | 365 | @property |
|
352 | 366 | def format(self): |
@@ -354,7 +368,9 b' class ZstdCompressionParameters(object):' | |||
|
354 | 368 | |
|
355 | 369 | @property |
|
356 | 370 | def compression_level(self): |
|
357 |
return _get_compression_parameter( |
|
|
371 | return _get_compression_parameter( | |
|
372 | self._params, lib.ZSTD_c_compressionLevel | |
|
373 | ) | |
|
358 | 374 | |
|
359 | 375 | @property |
|
360 | 376 | def window_log(self): |
@@ -386,7 +402,9 b' class ZstdCompressionParameters(object):' | |||
|
386 | 402 | |
|
387 | 403 | @property |
|
388 | 404 | def write_content_size(self): |
|
389 |
return _get_compression_parameter( |
|
|
405 | return _get_compression_parameter( | |
|
406 | self._params, lib.ZSTD_c_contentSizeFlag | |
|
407 | ) | |
|
390 | 408 | |
|
391 | 409 | @property |
|
392 | 410 | def write_checksum(self): |
@@ -410,7 +428,9 b' class ZstdCompressionParameters(object):' | |||
|
410 | 428 | |
|
411 | 429 | @property |
|
412 | 430 | def force_max_window(self): |
|
413 |
return _get_compression_parameter( |
|
|
431 | return _get_compression_parameter( | |
|
432 | self._params, lib.ZSTD_c_forceMaxWindow | |
|
433 | ) | |
|
414 | 434 | |
|
415 | 435 | @property |
|
416 | 436 | def enable_ldm(self): |
@@ -428,11 +448,15 b' class ZstdCompressionParameters(object):' | |||
|
428 | 448 | |
|
429 | 449 | @property |
|
430 | 450 | def ldm_bucket_size_log(self): |
|
431 |
return _get_compression_parameter( |
|
|
451 | return _get_compression_parameter( | |
|
452 | self._params, lib.ZSTD_c_ldmBucketSizeLog | |
|
453 | ) | |
|
432 | 454 | |
|
433 | 455 | @property |
|
434 | 456 | def ldm_hash_rate_log(self): |
|
435 |
return _get_compression_parameter( |
|
|
457 | return _get_compression_parameter( | |
|
458 | self._params, lib.ZSTD_c_ldmHashRateLog | |
|
459 | ) | |
|
436 | 460 | |
|
437 | 461 | @property |
|
438 | 462 | def ldm_hash_every_log(self): |
@@ -457,7 +481,8 b' def _set_compression_parameter(params, p' | |||
|
457 | 481 | zresult = lib.ZSTD_CCtxParams_setParameter(params, param, value) |
|
458 | 482 | if lib.ZSTD_isError(zresult): |
|
459 | 483 | raise ZstdError( |
|
460 |
"unable to set compression context parameter: %s" |
|
|
484 | "unable to set compression context parameter: %s" | |
|
485 | % _zstd_error(zresult) | |
|
461 | 486 | ) |
|
462 | 487 | |
|
463 | 488 | |
@@ -467,14 +492,17 b' def _get_compression_parameter(params, p' | |||
|
467 | 492 | zresult = lib.ZSTD_CCtxParams_getParameter(params, param, result) |
|
468 | 493 | if lib.ZSTD_isError(zresult): |
|
469 | 494 | raise ZstdError( |
|
470 |
"unable to get compression context parameter: %s" |
|
|
495 | "unable to get compression context parameter: %s" | |
|
496 | % _zstd_error(zresult) | |
|
471 | 497 | ) |
|
472 | 498 | |
|
473 | 499 | return result[0] |
|
474 | 500 | |
|
475 | 501 | |
|
476 | 502 | class ZstdCompressionWriter(object): |
|
477 | def __init__(self, compressor, writer, source_size, write_size, write_return_read): | |
|
503 | def __init__( | |
|
504 | self, compressor, writer, source_size, write_size, write_return_read | |
|
505 | ): | |
|
478 | 506 | self._compressor = compressor |
|
479 | 507 | self._writer = writer |
|
480 | 508 | self._write_size = write_size |
@@ -491,7 +519,9 b' class ZstdCompressionWriter(object):' | |||
|
491 | 519 | |
|
492 | 520 | zresult = lib.ZSTD_CCtx_setPledgedSrcSize(compressor._cctx, source_size) |
|
493 | 521 | if lib.ZSTD_isError(zresult): |
|
494 | raise ZstdError("error setting source size: %s" % _zstd_error(zresult)) | |
|
522 | raise ZstdError( | |
|
523 | "error setting source size: %s" % _zstd_error(zresult) | |
|
524 | ) | |
|
495 | 525 | |
|
496 | 526 | def __enter__(self): |
|
497 | 527 | if self._closed: |
@@ -595,13 +625,20 b' class ZstdCompressionWriter(object):' | |||
|
595 | 625 | |
|
596 | 626 | while in_buffer.pos < in_buffer.size: |
|
597 | 627 | zresult = lib.ZSTD_compressStream2( |
|
598 |
self._compressor._cctx, |
|
|
628 | self._compressor._cctx, | |
|
629 | out_buffer, | |
|
630 | in_buffer, | |
|
631 | lib.ZSTD_e_continue, | |
|
599 | 632 | ) |
|
600 | 633 | if lib.ZSTD_isError(zresult): |
|
601 | raise ZstdError("zstd compress error: %s" % _zstd_error(zresult)) | |
|
634 | raise ZstdError( | |
|
635 | "zstd compress error: %s" % _zstd_error(zresult) | |
|
636 | ) | |
|
602 | 637 | |
|
603 | 638 | if out_buffer.pos: |
|
604 |
self._writer.write( |
|
|
639 | self._writer.write( | |
|
640 | ffi.buffer(out_buffer.dst, out_buffer.pos)[:] | |
|
641 | ) | |
|
605 | 642 | total_write += out_buffer.pos |
|
606 | 643 | self._bytes_compressed += out_buffer.pos |
|
607 | 644 | out_buffer.pos = 0 |
@@ -637,10 +674,14 b' class ZstdCompressionWriter(object):' | |||
|
637 | 674 | self._compressor._cctx, out_buffer, in_buffer, flush |
|
638 | 675 | ) |
|
639 | 676 | if lib.ZSTD_isError(zresult): |
|
640 | raise ZstdError("zstd compress error: %s" % _zstd_error(zresult)) | |
|
677 | raise ZstdError( | |
|
678 | "zstd compress error: %s" % _zstd_error(zresult) | |
|
679 | ) | |
|
641 | 680 | |
|
642 | 681 | if out_buffer.pos: |
|
643 |
self._writer.write( |
|
|
682 | self._writer.write( | |
|
683 | ffi.buffer(out_buffer.dst, out_buffer.pos)[:] | |
|
684 | ) | |
|
644 | 685 | total_write += out_buffer.pos |
|
645 | 686 | self._bytes_compressed += out_buffer.pos |
|
646 | 687 | out_buffer.pos = 0 |
@@ -672,7 +713,9 b' class ZstdCompressionObj(object):' | |||
|
672 | 713 | self._compressor._cctx, self._out, source, lib.ZSTD_e_continue |
|
673 | 714 | ) |
|
674 | 715 | if lib.ZSTD_isError(zresult): |
|
675 | raise ZstdError("zstd compress error: %s" % _zstd_error(zresult)) | |
|
716 | raise ZstdError( | |
|
717 | "zstd compress error: %s" % _zstd_error(zresult) | |
|
718 | ) | |
|
676 | 719 | |
|
677 | 720 | if self._out.pos: |
|
678 | 721 | chunks.append(ffi.buffer(self._out.dst, self._out.pos)[:]) |
@@ -681,7 +724,10 b' class ZstdCompressionObj(object):' | |||
|
681 | 724 | return b"".join(chunks) |
|
682 | 725 | |
|
683 | 726 | def flush(self, flush_mode=COMPRESSOBJ_FLUSH_FINISH): |
|
684 | if flush_mode not in (COMPRESSOBJ_FLUSH_FINISH, COMPRESSOBJ_FLUSH_BLOCK): | |
|
727 | if flush_mode not in ( | |
|
728 | COMPRESSOBJ_FLUSH_FINISH, | |
|
729 | COMPRESSOBJ_FLUSH_BLOCK, | |
|
730 | ): | |
|
685 | 731 | raise ValueError("flush mode not recognized") |
|
686 | 732 | |
|
687 | 733 | if self._finished: |
@@ -768,7 +814,9 b' class ZstdCompressionChunker(object):' | |||
|
768 | 814 | self._in.pos = 0 |
|
769 | 815 | |
|
770 | 816 | if lib.ZSTD_isError(zresult): |
|
771 | raise ZstdError("zstd compress error: %s" % _zstd_error(zresult)) | |
|
817 | raise ZstdError( | |
|
818 | "zstd compress error: %s" % _zstd_error(zresult) | |
|
819 | ) | |
|
772 | 820 | |
|
773 | 821 | if self._out.pos == self._out.size: |
|
774 | 822 | yield ffi.buffer(self._out.dst, self._out.pos)[:] |
@@ -780,7 +828,8 b' class ZstdCompressionChunker(object):' | |||
|
780 | 828 | |
|
781 | 829 | if self._in.src != ffi.NULL: |
|
782 | 830 | raise ZstdError( |
|
783 |
"cannot call flush() before consuming output from " |
|
|
831 | "cannot call flush() before consuming output from " | |
|
832 | "previous operation" | |
|
784 | 833 | ) |
|
785 | 834 | |
|
786 | 835 | while True: |
@@ -788,7 +837,9 b' class ZstdCompressionChunker(object):' | |||
|
788 | 837 | self._compressor._cctx, self._out, self._in, lib.ZSTD_e_flush |
|
789 | 838 | ) |
|
790 | 839 | if lib.ZSTD_isError(zresult): |
|
791 | raise ZstdError("zstd compress error: %s" % _zstd_error(zresult)) | |
|
840 | raise ZstdError( | |
|
841 | "zstd compress error: %s" % _zstd_error(zresult) | |
|
842 | ) | |
|
792 | 843 | |
|
793 | 844 | if self._out.pos: |
|
794 | 845 | yield ffi.buffer(self._out.dst, self._out.pos)[:] |
@@ -812,7 +863,9 b' class ZstdCompressionChunker(object):' | |||
|
812 | 863 | self._compressor._cctx, self._out, self._in, lib.ZSTD_e_end |
|
813 | 864 | ) |
|
814 | 865 | if lib.ZSTD_isError(zresult): |
|
815 | raise ZstdError("zstd compress error: %s" % _zstd_error(zresult)) | |
|
866 | raise ZstdError( | |
|
867 | "zstd compress error: %s" % _zstd_error(zresult) | |
|
868 | ) | |
|
816 | 869 | |
|
817 | 870 | if self._out.pos: |
|
818 | 871 | yield ffi.buffer(self._out.dst, self._out.pos)[:] |
@@ -939,7 +992,10 b' class ZstdCompressionReader(object):' | |||
|
939 | 992 | old_pos = out_buffer.pos |
|
940 | 993 | |
|
941 | 994 | zresult = lib.ZSTD_compressStream2( |
|
942 | self._compressor._cctx, out_buffer, self._in_buffer, lib.ZSTD_e_continue | |
|
995 | self._compressor._cctx, | |
|
996 | out_buffer, | |
|
997 | self._in_buffer, | |
|
998 | lib.ZSTD_e_continue, | |
|
943 | 999 | ) |
|
944 | 1000 | |
|
945 | 1001 | self._bytes_compressed += out_buffer.pos - old_pos |
@@ -997,7 +1053,9 b' class ZstdCompressionReader(object):' | |||
|
997 | 1053 | self._bytes_compressed += out_buffer.pos - old_pos |
|
998 | 1054 | |
|
999 | 1055 | if lib.ZSTD_isError(zresult): |
|
1000 | raise ZstdError("error ending compression stream: %s", _zstd_error(zresult)) | |
|
1056 | raise ZstdError( | |
|
1057 | "error ending compression stream: %s", _zstd_error(zresult) | |
|
1058 | ) | |
|
1001 | 1059 | |
|
1002 | 1060 | if zresult == 0: |
|
1003 | 1061 | self._finished_output = True |
@@ -1102,7 +1160,9 b' class ZstdCompressionReader(object):' | |||
|
1102 | 1160 | self._bytes_compressed += out_buffer.pos - old_pos |
|
1103 | 1161 | |
|
1104 | 1162 | if lib.ZSTD_isError(zresult): |
|
1105 | raise ZstdError("error ending compression stream: %s", _zstd_error(zresult)) | |
|
1163 | raise ZstdError( | |
|
1164 | "error ending compression stream: %s", _zstd_error(zresult) | |
|
1165 | ) | |
|
1106 | 1166 | |
|
1107 | 1167 | if zresult == 0: |
|
1108 | 1168 | self._finished_output = True |
@@ -1170,13 +1230,17 b' class ZstdCompressor(object):' | |||
|
1170 | 1230 | threads=0, |
|
1171 | 1231 | ): |
|
1172 | 1232 | if level > lib.ZSTD_maxCLevel(): |
|
1173 | raise ValueError("level must be less than %d" % lib.ZSTD_maxCLevel()) | |
|
1233 | raise ValueError( | |
|
1234 | "level must be less than %d" % lib.ZSTD_maxCLevel() | |
|
1235 | ) | |
|
1174 | 1236 | |
|
1175 | 1237 | if threads < 0: |
|
1176 | 1238 | threads = _cpu_count() |
|
1177 | 1239 | |
|
1178 | 1240 | if compression_params and write_checksum is not None: |
|
1179 | raise ValueError("cannot define compression_params and " "write_checksum") | |
|
1241 | raise ValueError( | |
|
1242 | "cannot define compression_params and " "write_checksum" | |
|
1243 | ) | |
|
1180 | 1244 | |
|
1181 | 1245 | if compression_params and write_content_size is not None: |
|
1182 | 1246 | raise ValueError( |
@@ -1184,7 +1248,9 b' class ZstdCompressor(object):' | |||
|
1184 | 1248 | ) |
|
1185 | 1249 | |
|
1186 | 1250 | if compression_params and write_dict_id is not None: |
|
1187 | raise ValueError("cannot define compression_params and " "write_dict_id") | |
|
1251 | raise ValueError( | |
|
1252 | "cannot define compression_params and " "write_dict_id" | |
|
1253 | ) | |
|
1188 | 1254 | |
|
1189 | 1255 | if compression_params and threads: |
|
1190 | 1256 | raise ValueError("cannot define compression_params and threads") |
@@ -1201,7 +1267,9 b' class ZstdCompressor(object):' | |||
|
1201 | 1267 | |
|
1202 | 1268 | self._params = ffi.gc(params, lib.ZSTD_freeCCtxParams) |
|
1203 | 1269 | |
|
1204 |
_set_compression_parameter( |
|
|
1270 | _set_compression_parameter( | |
|
1271 | self._params, lib.ZSTD_c_compressionLevel, level | |
|
1272 | ) | |
|
1205 | 1273 | |
|
1206 | 1274 | _set_compression_parameter( |
|
1207 | 1275 | self._params, |
@@ -1210,7 +1278,9 b' class ZstdCompressor(object):' | |||
|
1210 | 1278 | ) |
|
1211 | 1279 | |
|
1212 | 1280 | _set_compression_parameter( |
|
1213 | self._params, lib.ZSTD_c_checksumFlag, 1 if write_checksum else 0 | |
|
1281 | self._params, | |
|
1282 | lib.ZSTD_c_checksumFlag, | |
|
1283 | 1 if write_checksum else 0, | |
|
1214 | 1284 | ) |
|
1215 | 1285 | |
|
1216 | 1286 | _set_compression_parameter( |
@@ -1218,7 +1288,9 b' class ZstdCompressor(object):' | |||
|
1218 | 1288 | ) |
|
1219 | 1289 | |
|
1220 | 1290 | if threads: |
|
1221 |
_set_compression_parameter( |
|
|
1291 | _set_compression_parameter( | |
|
1292 | self._params, lib.ZSTD_c_nbWorkers, threads | |
|
1293 | ) | |
|
1222 | 1294 | |
|
1223 | 1295 | cctx = lib.ZSTD_createCCtx() |
|
1224 | 1296 | if cctx == ffi.NULL: |
@@ -1237,10 +1309,13 b' class ZstdCompressor(object):' | |||
|
1237 | 1309 | ) |
|
1238 | 1310 | |
|
1239 | 1311 | def _setup_cctx(self): |
|
1240 |
zresult = lib.ZSTD_CCtx_setParametersUsingCCtxParams( |
|
|
1312 | zresult = lib.ZSTD_CCtx_setParametersUsingCCtxParams( | |
|
1313 | self._cctx, self._params | |
|
1314 | ) | |
|
1241 | 1315 | if lib.ZSTD_isError(zresult): |
|
1242 | 1316 | raise ZstdError( |
|
1243 |
"could not set compression parameters: %s" |
|
|
1317 | "could not set compression parameters: %s" | |
|
1318 | % _zstd_error(zresult) | |
|
1244 | 1319 | ) |
|
1245 | 1320 | |
|
1246 | 1321 | dict_data = self._dict_data |
@@ -1259,7 +1334,8 b' class ZstdCompressor(object):' | |||
|
1259 | 1334 | |
|
1260 | 1335 | if lib.ZSTD_isError(zresult): |
|
1261 | 1336 | raise ZstdError( |
|
1262 |
"could not load compression dictionary: %s" |
|
|
1337 | "could not load compression dictionary: %s" | |
|
1338 | % _zstd_error(zresult) | |
|
1263 | 1339 | ) |
|
1264 | 1340 | |
|
1265 | 1341 | def memory_size(self): |
@@ -1275,7 +1351,9 b' class ZstdCompressor(object):' | |||
|
1275 | 1351 | |
|
1276 | 1352 | zresult = lib.ZSTD_CCtx_setPledgedSrcSize(self._cctx, len(data_buffer)) |
|
1277 | 1353 | if lib.ZSTD_isError(zresult): |
|
1278 | raise ZstdError("error setting source size: %s" % _zstd_error(zresult)) | |
|
1354 | raise ZstdError( | |
|
1355 | "error setting source size: %s" % _zstd_error(zresult) | |
|
1356 | ) | |
|
1279 | 1357 | |
|
1280 | 1358 | out_buffer = ffi.new("ZSTD_outBuffer *") |
|
1281 | 1359 | in_buffer = ffi.new("ZSTD_inBuffer *") |
@@ -1307,11 +1385,15 b' class ZstdCompressor(object):' | |||
|
1307 | 1385 | |
|
1308 | 1386 | zresult = lib.ZSTD_CCtx_setPledgedSrcSize(self._cctx, size) |
|
1309 | 1387 | if lib.ZSTD_isError(zresult): |
|
1310 | raise ZstdError("error setting source size: %s" % _zstd_error(zresult)) | |
|
1388 | raise ZstdError( | |
|
1389 | "error setting source size: %s" % _zstd_error(zresult) | |
|
1390 | ) | |
|
1311 | 1391 | |
|
1312 | 1392 | cobj = ZstdCompressionObj() |
|
1313 | 1393 | cobj._out = ffi.new("ZSTD_outBuffer *") |
|
1314 |
cobj._dst_buffer = ffi.new( |
|
|
1394 | cobj._dst_buffer = ffi.new( | |
|
1395 | "char[]", COMPRESSION_RECOMMENDED_OUTPUT_SIZE | |
|
1396 | ) | |
|
1315 | 1397 | cobj._out.dst = cobj._dst_buffer |
|
1316 | 1398 | cobj._out.size = COMPRESSION_RECOMMENDED_OUTPUT_SIZE |
|
1317 | 1399 | cobj._out.pos = 0 |
@@ -1328,7 +1410,9 b' class ZstdCompressor(object):' | |||
|
1328 | 1410 | |
|
1329 | 1411 | zresult = lib.ZSTD_CCtx_setPledgedSrcSize(self._cctx, size) |
|
1330 | 1412 | if lib.ZSTD_isError(zresult): |
|
1331 | raise ZstdError("error setting source size: %s" % _zstd_error(zresult)) | |
|
1413 | raise ZstdError( | |
|
1414 | "error setting source size: %s" % _zstd_error(zresult) | |
|
1415 | ) | |
|
1332 | 1416 | |
|
1333 | 1417 | return ZstdCompressionChunker(self, chunk_size=chunk_size) |
|
1334 | 1418 | |
@@ -1353,7 +1437,9 b' class ZstdCompressor(object):' | |||
|
1353 | 1437 | |
|
1354 | 1438 | zresult = lib.ZSTD_CCtx_setPledgedSrcSize(self._cctx, size) |
|
1355 | 1439 | if lib.ZSTD_isError(zresult): |
|
1356 | raise ZstdError("error setting source size: %s" % _zstd_error(zresult)) | |
|
1440 | raise ZstdError( | |
|
1441 | "error setting source size: %s" % _zstd_error(zresult) | |
|
1442 | ) | |
|
1357 | 1443 | |
|
1358 | 1444 | in_buffer = ffi.new("ZSTD_inBuffer *") |
|
1359 | 1445 | out_buffer = ffi.new("ZSTD_outBuffer *") |
@@ -1381,7 +1467,9 b' class ZstdCompressor(object):' | |||
|
1381 | 1467 | self._cctx, out_buffer, in_buffer, lib.ZSTD_e_continue |
|
1382 | 1468 | ) |
|
1383 | 1469 | if lib.ZSTD_isError(zresult): |
|
1384 | raise ZstdError("zstd compress error: %s" % _zstd_error(zresult)) | |
|
1470 | raise ZstdError( | |
|
1471 | "zstd compress error: %s" % _zstd_error(zresult) | |
|
1472 | ) | |
|
1385 | 1473 | |
|
1386 | 1474 | if out_buffer.pos: |
|
1387 | 1475 | ofh.write(ffi.buffer(out_buffer.dst, out_buffer.pos)) |
@@ -1423,7 +1511,9 b' class ZstdCompressor(object):' | |||
|
1423 | 1511 | |
|
1424 | 1512 | zresult = lib.ZSTD_CCtx_setPledgedSrcSize(self._cctx, size) |
|
1425 | 1513 | if lib.ZSTD_isError(zresult): |
|
1426 | raise ZstdError("error setting source size: %s" % _zstd_error(zresult)) | |
|
1514 | raise ZstdError( | |
|
1515 | "error setting source size: %s" % _zstd_error(zresult) | |
|
1516 | ) | |
|
1427 | 1517 | |
|
1428 | 1518 | return ZstdCompressionReader(self, source, read_size) |
|
1429 | 1519 | |
@@ -1443,7 +1533,9 b' class ZstdCompressor(object):' | |||
|
1443 | 1533 | if size < 0: |
|
1444 | 1534 | size = lib.ZSTD_CONTENTSIZE_UNKNOWN |
|
1445 | 1535 | |
|
1446 | return ZstdCompressionWriter(self, writer, size, write_size, write_return_read) | |
|
1536 | return ZstdCompressionWriter( | |
|
1537 | self, writer, size, write_size, write_return_read | |
|
1538 | ) | |
|
1447 | 1539 | |
|
1448 | 1540 | write_to = stream_writer |
|
1449 | 1541 | |
@@ -1473,7 +1565,9 b' class ZstdCompressor(object):' | |||
|
1473 | 1565 | |
|
1474 | 1566 | zresult = lib.ZSTD_CCtx_setPledgedSrcSize(self._cctx, size) |
|
1475 | 1567 | if lib.ZSTD_isError(zresult): |
|
1476 | raise ZstdError("error setting source size: %s" % _zstd_error(zresult)) | |
|
1568 | raise ZstdError( | |
|
1569 | "error setting source size: %s" % _zstd_error(zresult) | |
|
1570 | ) | |
|
1477 | 1571 | |
|
1478 | 1572 | in_buffer = ffi.new("ZSTD_inBuffer *") |
|
1479 | 1573 | out_buffer = ffi.new("ZSTD_outBuffer *") |
@@ -1517,7 +1611,9 b' class ZstdCompressor(object):' | |||
|
1517 | 1611 | self._cctx, out_buffer, in_buffer, lib.ZSTD_e_continue |
|
1518 | 1612 | ) |
|
1519 | 1613 | if lib.ZSTD_isError(zresult): |
|
1520 | raise ZstdError("zstd compress error: %s" % _zstd_error(zresult)) | |
|
1614 | raise ZstdError( | |
|
1615 | "zstd compress error: %s" % _zstd_error(zresult) | |
|
1616 | ) | |
|
1521 | 1617 | |
|
1522 | 1618 | if out_buffer.pos: |
|
1523 | 1619 | data = ffi.buffer(out_buffer.dst, out_buffer.pos)[:] |
@@ -1596,10 +1692,14 b' def get_frame_parameters(data):' | |||
|
1596 | 1692 | data_buffer = ffi.from_buffer(data) |
|
1597 | 1693 | zresult = lib.ZSTD_getFrameHeader(params, data_buffer, len(data_buffer)) |
|
1598 | 1694 | if lib.ZSTD_isError(zresult): |
|
1599 | raise ZstdError("cannot get frame parameters: %s" % _zstd_error(zresult)) | |
|
1695 | raise ZstdError( | |
|
1696 | "cannot get frame parameters: %s" % _zstd_error(zresult) | |
|
1697 | ) | |
|
1600 | 1698 | |
|
1601 | 1699 | if zresult: |
|
1602 | raise ZstdError("not enough data for frame parameters; need %d bytes" % zresult) | |
|
1700 | raise ZstdError( | |
|
1701 | "not enough data for frame parameters; need %d bytes" % zresult | |
|
1702 | ) | |
|
1603 | 1703 | |
|
1604 | 1704 | return FrameParameters(params[0]) |
|
1605 | 1705 | |
@@ -1611,9 +1711,14 b' class ZstdCompressionDict(object):' | |||
|
1611 | 1711 | self.k = k |
|
1612 | 1712 | self.d = d |
|
1613 | 1713 | |
|
1614 | if dict_type not in (DICT_TYPE_AUTO, DICT_TYPE_RAWCONTENT, DICT_TYPE_FULLDICT): | |
|
1714 | if dict_type not in ( | |
|
1715 | DICT_TYPE_AUTO, | |
|
1716 | DICT_TYPE_RAWCONTENT, | |
|
1717 | DICT_TYPE_FULLDICT, | |
|
1718 | ): | |
|
1615 | 1719 | raise ValueError( |
|
1616 |
"invalid dictionary load mode: %d; must use " |
|
|
1720 | "invalid dictionary load mode: %d; must use " | |
|
1721 | "DICT_TYPE_* constants" | |
|
1617 | 1722 | ) |
|
1618 | 1723 | |
|
1619 | 1724 | self._dict_type = dict_type |
@@ -1630,7 +1735,9 b' class ZstdCompressionDict(object):' | |||
|
1630 | 1735 | |
|
1631 | 1736 | def precompute_compress(self, level=0, compression_params=None): |
|
1632 | 1737 | if level and compression_params: |
|
1633 | raise ValueError("must only specify one of level or " "compression_params") | |
|
1738 | raise ValueError( | |
|
1739 | "must only specify one of level or " "compression_params" | |
|
1740 | ) | |
|
1634 | 1741 | |
|
1635 | 1742 | if not level and not compression_params: |
|
1636 | 1743 | raise ValueError("must specify one of level or compression_params") |
@@ -1675,7 +1782,9 b' class ZstdCompressionDict(object):' | |||
|
1675 | 1782 | if ddict == ffi.NULL: |
|
1676 | 1783 | raise ZstdError("could not create decompression dict") |
|
1677 | 1784 | |
|
1678 | ddict = ffi.gc(ddict, lib.ZSTD_freeDDict, size=lib.ZSTD_sizeof_DDict(ddict)) | |
|
1785 | ddict = ffi.gc( | |
|
1786 | ddict, lib.ZSTD_freeDDict, size=lib.ZSTD_sizeof_DDict(ddict) | |
|
1787 | ) | |
|
1679 | 1788 | self.__dict__["_ddict"] = ddict |
|
1680 | 1789 | |
|
1681 | 1790 | return ddict |
@@ -1805,7 +1914,9 b' class ZstdDecompressionObj(object):' | |||
|
1805 | 1914 | self._decompressor._dctx, out_buffer, in_buffer |
|
1806 | 1915 | ) |
|
1807 | 1916 | if lib.ZSTD_isError(zresult): |
|
1808 | raise ZstdError("zstd decompressor error: %s" % _zstd_error(zresult)) | |
|
1917 | raise ZstdError( | |
|
1918 | "zstd decompressor error: %s" % _zstd_error(zresult) | |
|
1919 | ) | |
|
1809 | 1920 | |
|
1810 | 1921 | if zresult == 0: |
|
1811 | 1922 | self._finished = True |
@@ -2105,16 +2216,22 b' class ZstdDecompressionReader(object):' | |||
|
2105 | 2216 | |
|
2106 | 2217 | if whence == os.SEEK_SET: |
|
2107 | 2218 | if pos < 0: |
|
2108 | raise ValueError("cannot seek to negative position with SEEK_SET") | |
|
2219 | raise ValueError( | |
|
2220 | "cannot seek to negative position with SEEK_SET" | |
|
2221 | ) | |
|
2109 | 2222 | |
|
2110 | 2223 | if pos < self._bytes_decompressed: |
|
2111 | raise ValueError("cannot seek zstd decompression stream " "backwards") | |
|
2224 | raise ValueError( | |
|
2225 | "cannot seek zstd decompression stream " "backwards" | |
|
2226 | ) | |
|
2112 | 2227 | |
|
2113 | 2228 | read_amount = pos - self._bytes_decompressed |
|
2114 | 2229 | |
|
2115 | 2230 | elif whence == os.SEEK_CUR: |
|
2116 | 2231 | if pos < 0: |
|
2117 | raise ValueError("cannot seek zstd decompression stream " "backwards") | |
|
2232 | raise ValueError( | |
|
2233 | "cannot seek zstd decompression stream " "backwards" | |
|
2234 | ) | |
|
2118 | 2235 | |
|
2119 | 2236 | read_amount = pos |
|
2120 | 2237 | elif whence == os.SEEK_END: |
@@ -2123,7 +2240,9 b' class ZstdDecompressionReader(object):' | |||
|
2123 | 2240 | ) |
|
2124 | 2241 | |
|
2125 | 2242 | while read_amount: |
|
2126 | result = self.read(min(read_amount, DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE)) | |
|
2243 | result = self.read( | |
|
2244 | min(read_amount, DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE) | |
|
2245 | ) | |
|
2127 | 2246 | |
|
2128 | 2247 | if not result: |
|
2129 | 2248 | break |
@@ -2257,10 +2376,14 b' class ZstdDecompressionWriter(object):' | |||
|
2257 | 2376 | while in_buffer.pos < in_buffer.size: |
|
2258 | 2377 | zresult = lib.ZSTD_decompressStream(dctx, out_buffer, in_buffer) |
|
2259 | 2378 | if lib.ZSTD_isError(zresult): |
|
2260 | raise ZstdError("zstd decompress error: %s" % _zstd_error(zresult)) | |
|
2379 | raise ZstdError( | |
|
2380 | "zstd decompress error: %s" % _zstd_error(zresult) | |
|
2381 | ) | |
|
2261 | 2382 | |
|
2262 | 2383 | if out_buffer.pos: |
|
2263 |
self._writer.write( |
|
|
2384 | self._writer.write( | |
|
2385 | ffi.buffer(out_buffer.dst, out_buffer.pos)[:] | |
|
2386 | ) | |
|
2264 | 2387 | total_write += out_buffer.pos |
|
2265 | 2388 | out_buffer.pos = 0 |
|
2266 | 2389 | |
@@ -2299,7 +2422,9 b' class ZstdDecompressor(object):' | |||
|
2299 | 2422 | |
|
2300 | 2423 | data_buffer = ffi.from_buffer(data) |
|
2301 | 2424 | |
|
2302 |
output_size = lib.ZSTD_getFrameContentSize( |
|
|
2425 | output_size = lib.ZSTD_getFrameContentSize( | |
|
2426 | data_buffer, len(data_buffer) | |
|
2427 | ) | |
|
2303 | 2428 | |
|
2304 | 2429 | if output_size == lib.ZSTD_CONTENTSIZE_ERROR: |
|
2305 | 2430 | raise ZstdError("error determining content size from frame header") |
@@ -2307,7 +2432,9 b' class ZstdDecompressor(object):' | |||
|
2307 | 2432 | return b"" |
|
2308 | 2433 | elif output_size == lib.ZSTD_CONTENTSIZE_UNKNOWN: |
|
2309 | 2434 | if not max_output_size: |
|
2310 | raise ZstdError("could not determine content size in frame header") | |
|
2435 | raise ZstdError( | |
|
2436 | "could not determine content size in frame header" | |
|
2437 | ) | |
|
2311 | 2438 | |
|
2312 | 2439 | result_buffer = ffi.new("char[]", max_output_size) |
|
2313 | 2440 | result_size = max_output_size |
@@ -2330,7 +2457,9 b' class ZstdDecompressor(object):' | |||
|
2330 | 2457 | if lib.ZSTD_isError(zresult): |
|
2331 | 2458 | raise ZstdError("decompression error: %s" % _zstd_error(zresult)) |
|
2332 | 2459 | elif zresult: |
|
2333 | raise ZstdError("decompression error: did not decompress full frame") | |
|
2460 | raise ZstdError( | |
|
2461 | "decompression error: did not decompress full frame" | |
|
2462 | ) | |
|
2334 | 2463 | elif output_size and out_buffer.pos != output_size: |
|
2335 | 2464 | raise ZstdError( |
|
2336 | 2465 | "decompression error: decompressed %d bytes; expected %d" |
@@ -2346,7 +2475,9 b' class ZstdDecompressor(object):' | |||
|
2346 | 2475 | read_across_frames=False, |
|
2347 | 2476 | ): |
|
2348 | 2477 | self._ensure_dctx() |
|
2349 |
return ZstdDecompressionReader( |
|
|
2478 | return ZstdDecompressionReader( | |
|
2479 | self, source, read_size, read_across_frames | |
|
2480 | ) | |
|
2350 | 2481 | |
|
2351 | 2482 | def decompressobj(self, write_size=DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE): |
|
2352 | 2483 | if write_size < 1: |
@@ -2421,9 +2552,13 b' class ZstdDecompressor(object):' | |||
|
2421 | 2552 | while in_buffer.pos < in_buffer.size: |
|
2422 | 2553 | assert out_buffer.pos == 0 |
|
2423 | 2554 | |
|
2424 |
zresult = lib.ZSTD_decompressStream( |
|
|
2555 | zresult = lib.ZSTD_decompressStream( | |
|
2556 | self._dctx, out_buffer, in_buffer | |
|
2557 | ) | |
|
2425 | 2558 | if lib.ZSTD_isError(zresult): |
|
2426 | raise ZstdError("zstd decompress error: %s" % _zstd_error(zresult)) | |
|
2559 | raise ZstdError( | |
|
2560 | "zstd decompress error: %s" % _zstd_error(zresult) | |
|
2561 | ) | |
|
2427 | 2562 | |
|
2428 | 2563 | if out_buffer.pos: |
|
2429 | 2564 | data = ffi.buffer(out_buffer.dst, out_buffer.pos)[:] |
@@ -2449,7 +2584,9 b' class ZstdDecompressor(object):' | |||
|
2449 | 2584 | if not hasattr(writer, "write"): |
|
2450 | 2585 | raise ValueError("must pass an object with a write() method") |
|
2451 | 2586 | |
|
2452 |
return ZstdDecompressionWriter( |
|
|
2587 | return ZstdDecompressionWriter( | |
|
2588 | self, writer, write_size, write_return_read | |
|
2589 | ) | |
|
2453 | 2590 | |
|
2454 | 2591 | write_to = stream_writer |
|
2455 | 2592 | |
@@ -2491,7 +2628,9 b' class ZstdDecompressor(object):' | |||
|
2491 | 2628 | |
|
2492 | 2629 | # Flush all read data to output. |
|
2493 | 2630 | while in_buffer.pos < in_buffer.size: |
|
2494 |
zresult = lib.ZSTD_decompressStream( |
|
|
2631 | zresult = lib.ZSTD_decompressStream( | |
|
2632 | self._dctx, out_buffer, in_buffer | |
|
2633 | ) | |
|
2495 | 2634 | if lib.ZSTD_isError(zresult): |
|
2496 | 2635 | raise ZstdError( |
|
2497 | 2636 | "zstd decompressor error: %s" % _zstd_error(zresult) |
@@ -2521,7 +2660,9 b' class ZstdDecompressor(object):' | |||
|
2521 | 2660 | # All chunks should be zstd frames and should have content size set. |
|
2522 | 2661 | chunk_buffer = ffi.from_buffer(chunk) |
|
2523 | 2662 | params = ffi.new("ZSTD_frameHeader *") |
|
2524 |
zresult = lib.ZSTD_getFrameHeader( |
|
|
2663 | zresult = lib.ZSTD_getFrameHeader( | |
|
2664 | params, chunk_buffer, len(chunk_buffer) | |
|
2665 | ) | |
|
2525 | 2666 | if lib.ZSTD_isError(zresult): |
|
2526 | 2667 | raise ValueError("chunk 0 is not a valid zstd frame") |
|
2527 | 2668 | elif zresult: |
@@ -2546,7 +2687,9 b' class ZstdDecompressor(object):' | |||
|
2546 | 2687 | |
|
2547 | 2688 | zresult = lib.ZSTD_decompressStream(self._dctx, out_buffer, in_buffer) |
|
2548 | 2689 | if lib.ZSTD_isError(zresult): |
|
2549 | raise ZstdError("could not decompress chunk 0: %s" % _zstd_error(zresult)) | |
|
2690 | raise ZstdError( | |
|
2691 | "could not decompress chunk 0: %s" % _zstd_error(zresult) | |
|
2692 | ) | |
|
2550 | 2693 | elif zresult: |
|
2551 | 2694 | raise ZstdError("chunk 0 did not decompress full frame") |
|
2552 | 2695 | |
@@ -2561,11 +2704,15 b' class ZstdDecompressor(object):' | |||
|
2561 | 2704 | raise ValueError("chunk %d must be bytes" % i) |
|
2562 | 2705 | |
|
2563 | 2706 | chunk_buffer = ffi.from_buffer(chunk) |
|
2564 |
zresult = lib.ZSTD_getFrameHeader( |
|
|
2707 | zresult = lib.ZSTD_getFrameHeader( | |
|
2708 | params, chunk_buffer, len(chunk_buffer) | |
|
2709 | ) | |
|
2565 | 2710 | if lib.ZSTD_isError(zresult): |
|
2566 | 2711 | raise ValueError("chunk %d is not a valid zstd frame" % i) |
|
2567 | 2712 | elif zresult: |
|
2568 | raise ValueError("chunk %d is too small to contain a zstd frame" % i) | |
|
2713 | raise ValueError( | |
|
2714 | "chunk %d is too small to contain a zstd frame" % i | |
|
2715 | ) | |
|
2569 | 2716 | |
|
2570 | 2717 | if params.frameContentSize == lib.ZSTD_CONTENTSIZE_UNKNOWN: |
|
2571 | 2718 | raise ValueError("chunk %d missing content size in frame" % i) |
@@ -2580,7 +2727,9 b' class ZstdDecompressor(object):' | |||
|
2580 | 2727 | in_buffer.size = len(chunk_buffer) |
|
2581 | 2728 | in_buffer.pos = 0 |
|
2582 | 2729 | |
|
2583 |
zresult = lib.ZSTD_decompressStream( |
|
|
2730 | zresult = lib.ZSTD_decompressStream( | |
|
2731 | self._dctx, out_buffer, in_buffer | |
|
2732 | ) | |
|
2584 | 2733 | if lib.ZSTD_isError(zresult): |
|
2585 | 2734 | raise ZstdError( |
|
2586 | 2735 | "could not decompress chunk %d: %s" % _zstd_error(zresult) |
@@ -2597,7 +2746,9 b' class ZstdDecompressor(object):' | |||
|
2597 | 2746 | lib.ZSTD_DCtx_reset(self._dctx, lib.ZSTD_reset_session_only) |
|
2598 | 2747 | |
|
2599 | 2748 | if self._max_window_size: |
|
2600 |
zresult = lib.ZSTD_DCtx_setMaxWindowSize( |
|
|
2749 | zresult = lib.ZSTD_DCtx_setMaxWindowSize( | |
|
2750 | self._dctx, self._max_window_size | |
|
2751 | ) | |
|
2601 | 2752 | if lib.ZSTD_isError(zresult): |
|
2602 | 2753 | raise ZstdError( |
|
2603 | 2754 | "unable to set max window size: %s" % _zstd_error(zresult) |
@@ -2605,11 +2756,14 b' class ZstdDecompressor(object):' | |||
|
2605 | 2756 | |
|
2606 | 2757 | zresult = lib.ZSTD_DCtx_setFormat(self._dctx, self._format) |
|
2607 | 2758 | if lib.ZSTD_isError(zresult): |
|
2608 | raise ZstdError("unable to set decoding format: %s" % _zstd_error(zresult)) | |
|
2759 | raise ZstdError( | |
|
2760 | "unable to set decoding format: %s" % _zstd_error(zresult) | |
|
2761 | ) | |
|
2609 | 2762 | |
|
2610 | 2763 | if self._dict_data and load_dict: |
|
2611 | 2764 | zresult = lib.ZSTD_DCtx_refDDict(self._dctx, self._dict_data._ddict) |
|
2612 | 2765 | if lib.ZSTD_isError(zresult): |
|
2613 | 2766 | raise ZstdError( |
|
2614 |
"unable to reference prepared dictionary: %s" |
|
|
2767 | "unable to reference prepared dictionary: %s" | |
|
2768 | % _zstd_error(zresult) | |
|
2615 | 2769 | ) |
@@ -5,7 +5,7 b' GENDOC=gendoc.py ../mercurial/commands.p' | |||
|
5 | 5 | ../mercurial/helptext/*.txt ../hgext/*.py ../hgext/*/__init__.py |
|
6 | 6 | PREFIX=/usr/local |
|
7 | 7 | MANDIR=$(PREFIX)/share/man |
|
8 |
INSTALL=install |
|
|
8 | INSTALL=install -m 644 | |
|
9 | 9 | PYTHON?=python |
|
10 | 10 | RSTARGS= |
|
11 | 11 |
@@ -407,7 +407,7 b' class filefixupstate(object):' | |||
|
407 | 407 | involved = [ |
|
408 | 408 | annotated[i] for i in nearbylinenums if annotated[i][0] != 1 |
|
409 | 409 | ] |
|
410 |
involvedrevs = list( |
|
|
410 | involvedrevs = list({r for r, l in involved}) | |
|
411 | 411 | newfixups = [] |
|
412 | 412 | if len(involvedrevs) == 1 and self._iscontinuous(a1, a2 - 1, True): |
|
413 | 413 | # chunk belongs to a single revision |
@@ -734,10 +734,10 b' class fixupstate(object):' | |||
|
734 | 734 | @property |
|
735 | 735 | def chunkstats(self): |
|
736 | 736 | """-> {path: chunkstats}. collect chunkstats from filefixupstates""" |
|
737 |
return |
|
|
738 |
|
|
|
737 | return { | |
|
738 | path: state.chunkstats | |
|
739 | 739 | for path, state in pycompat.iteritems(self.fixupmap) |
|
740 |
|
|
|
740 | } | |
|
741 | 741 | |
|
742 | 742 | def commit(self): |
|
743 | 743 | """commit changes. update self.finalnode, self.replacemap""" |
@@ -1077,7 +1077,7 b' def absorb(ui, repo, stack=None, targetc' | |||
|
1077 | 1077 | b'i', |
|
1078 | 1078 | b'interactive', |
|
1079 | 1079 | None, |
|
1080 |
_(b'interactively select which chunks to apply |
|
|
1080 | _(b'interactively select which chunks to apply'), | |
|
1081 | 1081 | ), |
|
1082 | 1082 | ( |
|
1083 | 1083 | b'e', |
@@ -71,6 +71,8 b' def getprettygraphnode(orig, *args, **kw' | |||
|
71 | 71 | return b'\xE2\x97\x8B' # U+25CB ○ |
|
72 | 72 | if node == b'@': |
|
73 | 73 | return b'\xE2\x97\x8D' # U+25CD ◍ |
|
74 | if node == b'%': | |
|
75 | return b'\xE2\x97\x8D' # U+25CE ◎ | |
|
74 | 76 | if node == b'*': |
|
75 | 77 | return b'\xE2\x88\x97' # U+2217 ∗ |
|
76 | 78 | if node == b'x': |
@@ -76,7 +76,7 b' def close_branch(ui, repo, *revs, **opts' | |||
|
76 | 76 | heads = [] |
|
77 | 77 | for branch in repo.branchmap(): |
|
78 | 78 | heads.extend(repo.branchheads(branch)) |
|
79 |
heads = |
|
|
79 | heads = {repo[h].rev() for h in heads} | |
|
80 | 80 | for rev in revs: |
|
81 | 81 | if rev not in heads: |
|
82 | 82 | raise error.Abort(_(b'revision is not an open head: %d') % rev) |
@@ -677,13 +677,9 b' class mercurial_source(common.converter_' | |||
|
677 | 677 | for t in self.repo.tagslist() |
|
678 | 678 | if self.repo.tagtype(t[0]) == b'global' |
|
679 | 679 | ] |
|
680 |
return |
|
|
681 | [ | |
|
682 | (name, nodemod.hex(node)) | |
|
683 | for name, node in tags | |
|
684 | if self.keep(node) | |
|
685 | ] | |
|
686 | ) | |
|
680 | return { | |
|
681 | name: nodemod.hex(node) for name, node in tags if self.keep(node) | |
|
682 | } | |
|
687 | 683 | |
|
688 | 684 | def getchangedfiles(self, rev, i): |
|
689 | 685 | ctx = self._changectx(rev) |
@@ -710,11 +710,11 b' class svn_source(converter_source):' | |||
|
710 | 710 | # Here/tags/tag.1 discarded as well as its children. |
|
711 | 711 | # It happens with tools like cvs2svn. Such tags cannot |
|
712 | 712 | # be represented in mercurial. |
|
713 |
addeds = |
|
|
714 |
|
|
|
713 | addeds = { | |
|
714 | p: e.copyfrom_path | |
|
715 | 715 | for p, e in pycompat.iteritems(origpaths) |
|
716 | 716 | if e.action == b'A' and e.copyfrom_path |
|
717 |
|
|
|
717 | } | |
|
718 | 718 | badroots = set() |
|
719 | 719 | for destroot in addeds: |
|
720 | 720 | for source, sourcerev, dest in pendings: |
@@ -221,7 +221,7 b' class eolfile(object):' | |||
|
221 | 221 | self.match = match.match(root, b'', [], include, exclude) |
|
222 | 222 | |
|
223 | 223 | def copytoui(self, ui): |
|
224 |
newpatterns = |
|
|
224 | newpatterns = {pattern for pattern, key, m in self.patterns} | |
|
225 | 225 | for section in (b'decode', b'encode'): |
|
226 | 226 | for oldpattern, _filter in ui.configitems(section): |
|
227 | 227 | if oldpattern not in newpatterns: |
@@ -233,7 +233,7 b' def fastannotate(ui, repo, *pats, **opts' | |||
|
233 | 233 | showlines=(showlines and not showdeleted), |
|
234 | 234 | ) |
|
235 | 235 | if showdeleted: |
|
236 |
existinglines = |
|
|
236 | existinglines = {(l[0], l[1]) for l in result} | |
|
237 | 237 | result = a.annotatealllines( |
|
238 | 238 | rev, showpath=showpath, showlines=showlines |
|
239 | 239 | ) |
@@ -171,11 +171,11 b" def fetch(ui, repo, source=b'default', *" | |||
|
171 | 171 | % (repo.changelog.rev(firstparent), short(firstparent)) |
|
172 | 172 | ) |
|
173 | 173 | hg.clean(repo, firstparent) |
|
174 | p2ctx = repo[secondparent] | |
|
174 | 175 | ui.status( |
|
175 | _(b'merging with %d:%s\n') | |
|
176 | % (repo.changelog.rev(secondparent), short(secondparent)) | |
|
176 | _(b'merging with %d:%s\n') % (p2ctx.rev(), short(secondparent)) | |
|
177 | 177 | ) |
|
178 |
err = hg.merge( |
|
|
178 | err = hg.merge(p2ctx, remind=False) | |
|
179 | 179 | |
|
180 | 180 | if not err: |
|
181 | 181 | # we don't translate commit messages |
@@ -213,7 +213,14 b' baseopt = (' | |||
|
213 | 213 | ), |
|
214 | 214 | _(b'REV'), |
|
215 | 215 | ) |
|
216 | revopt = (b'r', b'rev', [], _(b'revisions to fix'), _(b'REV')) | |
|
216 | revopt = (b'r', b'rev', [], _(b'revisions to fix (ADVANCED)'), _(b'REV')) | |
|
217 | sourceopt = ( | |
|
218 | b's', | |
|
219 | b'source', | |
|
220 | [], | |
|
221 | _(b'fix the specified revisions and their descendants'), | |
|
222 | _(b'REV'), | |
|
223 | ) | |
|
217 | 224 | wdiropt = (b'w', b'working-dir', False, _(b'fix the working directory')) |
|
218 | 225 | wholeopt = (b'', b'whole', False, _(b'always fix every line of a file')) |
|
219 | 226 | usage = _(b'[OPTION]... [FILE]...') |
@@ -221,7 +228,7 b" usage = _(b'[OPTION]... [FILE]...')" | |||
|
221 | 228 | |
|
222 | 229 | @command( |
|
223 | 230 | b'fix', |
|
224 | [allopt, baseopt, revopt, wdiropt, wholeopt], | |
|
231 | [allopt, baseopt, revopt, sourceopt, wdiropt, wholeopt], | |
|
225 | 232 | usage, |
|
226 | 233 | helpcategory=command.CATEGORY_FILE_CONTENTS, |
|
227 | 234 | ) |
@@ -249,10 +256,11 b' def fix(ui, repo, *pats, **opts):' | |||
|
249 | 256 | override this default behavior, though it is not usually desirable to do so. |
|
250 | 257 | """ |
|
251 | 258 | opts = pycompat.byteskwargs(opts) |
|
252 | cmdutil.check_at_most_one_arg(opts, b'all', b'rev') | |
|
253 | if opts[b'all']: | |
|
254 | opts[b'rev'] = [b'not public() and not obsolete()'] | |
|
255 | opts[b'working_dir'] = True | |
|
259 | cmdutil.check_at_most_one_arg(opts, b'all', b'source', b'rev') | |
|
260 | cmdutil.check_incompatible_arguments( | |
|
261 | opts, b'working_dir', [b'all', b'source'] | |
|
262 | ) | |
|
263 | ||
|
256 | 264 | with repo.wlock(), repo.lock(), repo.transaction(b'fix'): |
|
257 | 265 | revstofix = getrevstofix(ui, repo, opts) |
|
258 | 266 | basectxs = getbasectxs(repo, opts, revstofix) |
@@ -398,15 +406,27 b' def getworkqueue(ui, repo, pats, opts, r' | |||
|
398 | 406 | |
|
399 | 407 | def getrevstofix(ui, repo, opts): |
|
400 | 408 | """Returns the set of revision numbers that should be fixed""" |
|
409 | if opts[b'all']: | |
|
410 | revs = repo.revs(b'(not public() and not obsolete()) or wdir()') | |
|
411 | elif opts[b'source']: | |
|
412 | source_revs = scmutil.revrange(repo, opts[b'source']) | |
|
413 | revs = set(repo.revs(b'%ld::', source_revs)) | |
|
414 | if wdirrev in source_revs: | |
|
415 | # `wdir()::` is currently empty, so manually add wdir | |
|
416 | revs.add(wdirrev) | |
|
417 | if repo[b'.'].rev() in revs: | |
|
418 | revs.add(wdirrev) | |
|
419 | else: | |
|
401 | 420 | revs = set(scmutil.revrange(repo, opts[b'rev'])) |
|
421 | if opts.get(b'working_dir'): | |
|
422 | revs.add(wdirrev) | |
|
402 | 423 | for rev in revs: |
|
403 | 424 | checkfixablectx(ui, repo, repo[rev]) |
|
404 | if revs: | |
|
425 | # Allow fixing only wdir() even if there's an unfinished operation | |
|
426 | if not (len(revs) == 1 and wdirrev in revs): | |
|
405 | 427 | cmdutil.checkunfinished(repo) |
|
406 | 428 | rewriteutil.precheck(repo, revs, b'fix') |
|
407 | if opts.get(b'working_dir'): | |
|
408 | revs.add(wdirrev) | |
|
409 | if list(merge.mergestate.read(repo).unresolved()): | |
|
429 | if wdirrev in revs and list(merge.mergestate.read(repo).unresolved()): | |
|
410 | 430 |
|
|
411 | 431 | if not revs: |
|
412 | 432 | raise error.Abort( |
@@ -735,15 +755,7 b' def replacerev(ui, repo, ctx, filedata, ' | |||
|
735 | 755 | |
|
736 | 756 | wctx = context.overlayworkingctx(repo) |
|
737 | 757 | wctx.setbase(repo[newp1node]) |
|
738 | merge.update( | |
|
739 | repo, | |
|
740 | ctx.rev(), | |
|
741 | branchmerge=False, | |
|
742 | force=True, | |
|
743 | ancestor=p1rev, | |
|
744 | mergeancestor=False, | |
|
745 | wc=wctx, | |
|
746 | ) | |
|
758 | merge.revert_to(ctx, wc=wctx) | |
|
747 | 759 | copies.graftcopies(wctx, ctx, ctx.p1()) |
|
748 | 760 | |
|
749 | 761 | for path in filedata.keys(): |
@@ -397,7 +397,7 b' def overridewalk(orig, self, match, subr' | |||
|
397 | 397 | # for file paths which require normalization and we encounter a case |
|
398 | 398 | # collision, we store our own foldmap |
|
399 | 399 | if normalize: |
|
400 |
foldmap = |
|
|
400 | foldmap = {normcase(k): k for k in results} | |
|
401 | 401 | |
|
402 | 402 | switch_slashes = pycompat.ossep == b'\\' |
|
403 | 403 | # The order of the results is, strictly speaking, undefined. |
@@ -459,22 +459,16 b' def overridewalk(orig, self, match, subr' | |||
|
459 | 459 | if normalize: |
|
460 | 460 | # any notable files that have changed case will already be handled |
|
461 | 461 | # above, so just check membership in the foldmap |
|
462 |
notefiles = |
|
|
463 | ( | |
|
462 | notefiles = { | |
|
464 | 463 |
|
|
465 | 464 |
|
|
466 | 465 |
|
|
467 |
|
|
|
468 | ) | |
|
469 | visit = set( | |
|
470 | ( | |
|
466 | } | |
|
467 | visit = { | |
|
471 | 468 |
|
|
472 | 469 |
|
|
473 | if ( | |
|
474 | f not in results and matchfn(f) and (f in dmap or not ignore(f)) | |
|
475 | ) | |
|
476 | ) | |
|
477 | ) | |
|
470 | if (f not in results and matchfn(f) and (f in dmap or not ignore(f))) | |
|
471 | } | |
|
478 | 472 | |
|
479 | 473 | if not fresh_instance: |
|
480 | 474 | if matchalways: |
@@ -358,7 +358,7 b' def revtree(ui, args, repo, full=b"tree"' | |||
|
358 | 358 | ) |
|
359 | 359 | def revlist(ui, repo, *revs, **opts): |
|
360 | 360 | """print revisions""" |
|
361 |
if opts[ |
|
|
361 | if opts['header']: | |
|
362 | 362 | full = b"commit" |
|
363 | 363 | else: |
|
364 | 364 | full = None |
@@ -649,7 +649,7 b' def applychanges(ui, repo, ctx, opts):' | |||
|
649 | 649 | repo.ui.setconfig( |
|
650 | 650 | b'ui', b'forcemerge', opts.get(b'tool', b''), b'histedit' |
|
651 | 651 | ) |
|
652 |
stats = mergemod.graft(repo, ctx, |
|
|
652 | stats = mergemod.graft(repo, ctx, labels=[b'local', b'histedit']) | |
|
653 | 653 | finally: |
|
654 | 654 | repo.ui.setconfig(b'ui', b'forcemerge', b'', b'histedit') |
|
655 | 655 | return stats |
@@ -835,10 +835,10 b' class fold(histeditaction):' | |||
|
835 | 835 | return ctx, [(self.node, (parentctxnode,))] |
|
836 | 836 | |
|
837 | 837 | parentctx = repo[parentctxnode] |
|
838 |
newcommits = |
|
|
838 | newcommits = { | |
|
839 | 839 | c.node() |
|
840 | 840 | for c in repo.set(b'(%d::. - %d)', parentctx.rev(), parentctx.rev()) |
|
841 |
|
|
|
841 | } | |
|
842 | 842 | if not newcommits: |
|
843 | 843 | repo.ui.warn( |
|
844 | 844 | _( |
@@ -945,7 +945,7 b' class fold(histeditaction):' | |||
|
945 | 945 | class base(histeditaction): |
|
946 | 946 | def run(self): |
|
947 | 947 | if self.repo[b'.'].node() != self.node: |
|
948 |
mergemod.update(self.repo |
|
|
948 | mergemod.clean_update(self.repo[self.node]) | |
|
949 | 949 | return self.continueclean() |
|
950 | 950 | |
|
951 | 951 | def continuedirty(self): |
@@ -1113,7 +1113,8 b' def screen_size():' | |||
|
1113 | 1113 | |
|
1114 | 1114 | |
|
1115 | 1115 | class histeditrule(object): |
|
1116 | def __init__(self, ctx, pos, action=b'pick'): | |
|
1116 | def __init__(self, ui, ctx, pos, action=b'pick'): | |
|
1117 | self.ui = ui | |
|
1117 | 1118 | self.ctx = ctx |
|
1118 | 1119 | self.action = action |
|
1119 | 1120 | self.origpos = pos |
@@ -1153,6 +1154,14 b' class histeditrule(object):' | |||
|
1153 | 1154 | |
|
1154 | 1155 | @property |
|
1155 | 1156 | def desc(self): |
|
1157 | summary = ( | |
|
1158 | cmdutil.rendertemplate( | |
|
1159 | self.ctx, self.ui.config(b'histedit', b'summary-template') | |
|
1160 | ) | |
|
1161 | or b'' | |
|
1162 | ) | |
|
1163 | if summary: | |
|
1164 | return summary | |
|
1156 | 1165 | # This is split off from the prefix property so that we can |
|
1157 | 1166 | # separately make the description for 'roll' red (since it |
|
1158 | 1167 | # will get discarded). |
@@ -1258,7 +1267,7 b' def changeview(state, delta, unit):' | |||
|
1258 | 1267 | num_lines = len(mode_state[b'patchcontents']) |
|
1259 | 1268 | page_height = state[b'page_height'] |
|
1260 | 1269 | unit = page_height if unit == b'page' else 1 |
|
1261 | num_pages = 1 + (num_lines - 1) / page_height | |
|
1270 | num_pages = 1 + (num_lines - 1) // page_height | |
|
1262 | 1271 | max_offset = (num_pages - 1) * page_height |
|
1263 | 1272 | newline = mode_state[b'line_offset'] + delta * unit |
|
1264 | 1273 | mode_state[b'line_offset'] = max(0, min(max_offset, newline)) |
@@ -1700,7 +1709,7 b' def _chistedit(ui, repo, freeargs, opts)' | |||
|
1700 | 1709 | |
|
1701 | 1710 | ctxs = [] |
|
1702 | 1711 | for i, r in enumerate(revs): |
|
1703 | ctxs.append(histeditrule(repo[r], i)) | |
|
1712 | ctxs.append(histeditrule(ui, repo[r], i)) | |
|
1704 | 1713 | # Curses requires setting the locale or it will default to the C |
|
1705 | 1714 | # locale. This sets the locale to the user's default system |
|
1706 | 1715 | # locale. |
@@ -2412,7 +2421,7 b' def verifyactions(actions, state, ctxs):' | |||
|
2412 | 2421 | Will abort if there are to many or too few rules, a malformed rule, |
|
2413 | 2422 | or a rule on a changeset outside of the user-given range. |
|
2414 | 2423 | """ |
|
2415 |
expected = |
|
|
2424 | expected = {c.node() for c in ctxs} | |
|
2416 | 2425 | seen = set() |
|
2417 | 2426 | prev = None |
|
2418 | 2427 |
@@ -67,7 +67,7 b' class basestore(object):' | |||
|
67 | 67 | ui = self.ui |
|
68 | 68 | |
|
69 | 69 | at = 0 |
|
70 |
available = self.exists( |
|
|
70 | available = self.exists({hash for (_filename, hash) in files}) | |
|
71 | 71 | with ui.makeprogress( |
|
72 | 72 | _(b'getting largefiles'), unit=_(b'files'), total=len(files) |
|
73 | 73 | ) as progress: |
@@ -92,16 +92,30 b' def _usercachedir(ui, name=longname):' | |||
|
92 | 92 | path = ui.configpath(name, b'usercache') |
|
93 | 93 | if path: |
|
94 | 94 | return path |
|
95 | ||
|
96 | hint = None | |
|
97 | ||
|
95 | 98 | if pycompat.iswindows: |
|
96 | 99 | appdata = encoding.environ.get( |
|
97 | 100 | b'LOCALAPPDATA', encoding.environ.get(b'APPDATA') |
|
98 | 101 | ) |
|
99 | 102 | if appdata: |
|
100 | 103 | return os.path.join(appdata, name) |
|
104 | ||
|
105 | hint = _(b"define %s or %s in the environment, or set %s.usercache") % ( | |
|
106 | b"LOCALAPPDATA", | |
|
107 | b"APPDATA", | |
|
108 | name, | |
|
109 | ) | |
|
101 | 110 | elif pycompat.isdarwin: |
|
102 | 111 | home = encoding.environ.get(b'HOME') |
|
103 | 112 | if home: |
|
104 | 113 | return os.path.join(home, b'Library', b'Caches', name) |
|
114 | ||
|
115 | hint = _(b"define %s in the environment, or set %s.usercache") % ( | |
|
116 | b"HOME", | |
|
117 | name, | |
|
118 | ) | |
|
105 | 119 | elif pycompat.isposix: |
|
106 | 120 | path = encoding.environ.get(b'XDG_CACHE_HOME') |
|
107 | 121 | if path: |
@@ -109,11 +123,18 b' def _usercachedir(ui, name=longname):' | |||
|
109 | 123 | home = encoding.environ.get(b'HOME') |
|
110 | 124 | if home: |
|
111 | 125 | return os.path.join(home, b'.cache', name) |
|
126 | ||
|
127 | hint = _(b"define %s or %s in the environment, or set %s.usercache") % ( | |
|
128 | b"XDG_CACHE_HOME", | |
|
129 | b"HOME", | |
|
130 | name, | |
|
131 | ) | |
|
112 | 132 | else: |
|
113 | 133 | raise error.Abort( |
|
114 | 134 | _(b'unknown operating system: %s\n') % pycompat.osname |
|
115 | 135 | ) |
|
116 | raise error.Abort(_(b'unknown %s usercache location') % name) | |
|
136 | ||
|
137 | raise error.Abort(_(b'unknown %s usercache location') % name, hint=hint) | |
|
117 | 138 | |
|
118 | 139 | |
|
119 | 140 | def inusercache(ui, hash): |
@@ -1564,11 +1564,11 b' def overridepurge(orig, ui, repo, *dirs,' | |||
|
1564 | 1564 | def overriderollback(orig, ui, repo, **opts): |
|
1565 | 1565 | with repo.wlock(): |
|
1566 | 1566 | before = repo.dirstate.parents() |
|
1567 |
orphans = |
|
|
1567 | orphans = { | |
|
1568 | 1568 | f |
|
1569 | 1569 | for f in repo.dirstate |
|
1570 | 1570 | if lfutil.isstandin(f) and repo.dirstate[f] != b'r' |
|
1571 |
|
|
|
1571 | } | |
|
1572 | 1572 | result = orig(ui, repo, **opts) |
|
1573 | 1573 | after = repo.dirstate.parents() |
|
1574 | 1574 | if before == after: |
@@ -48,12 +48,12 b' class remotestore(basestore.basestore):' | |||
|
48 | 48 | ) |
|
49 | 49 | |
|
50 | 50 | def exists(self, hashes): |
|
51 |
return |
|
|
52 |
|
|
|
51 | return { | |
|
52 | h: s == 0 | |
|
53 | 53 | for (h, s) in pycompat.iteritems( |
|
54 | 54 | self._stat(hashes) |
|
55 | 55 | ) # dict-from-generator |
|
56 |
|
|
|
56 | } | |
|
57 | 57 | |
|
58 | 58 | def sendfile(self, filename, hash): |
|
59 | 59 | self.ui.debug(b'remotestore: sendfile(%s, %s)\n' % (filename, hash)) |
@@ -38,9 +38,6 b' ruled out) prior to taking off the exper' | |||
|
38 | 38 | |
|
39 | 39 | * `hg diff` is similar, and probably shouldn't see the pointer file |
|
40 | 40 | |
|
41 | #. `Fix https multiplexing, and re-enable workers | |
|
42 | <https://www.mercurial-scm.org/pipermail/mercurial-devel/2018-January/109916.html>`_. | |
|
43 | ||
|
44 | 41 | #. Show to-be-applied rules with `hg files -r 'wdir()' 'set:lfs()'` |
|
45 | 42 | |
|
46 | 43 | * `debugignore` can show file + line number, so a dedicated command could be |
@@ -181,7 +181,7 b' eh.configitem(' | |||
|
181 | 181 | b'experimental', b'lfs.disableusercache', default=False, |
|
182 | 182 | ) |
|
183 | 183 | eh.configitem( |
|
184 |
b'experimental', b'lfs.worker-enable', default= |
|
|
184 | b'experimental', b'lfs.worker-enable', default=True, | |
|
185 | 185 | ) |
|
186 | 186 | |
|
187 | 187 | eh.configitem( |
@@ -21,6 +21,7 b' from mercurial.pycompat import getattr' | |||
|
21 | 21 | from mercurial import ( |
|
22 | 22 | encoding, |
|
23 | 23 | error, |
|
24 | httpconnection as httpconnectionmod, | |
|
24 | 25 | node, |
|
25 | 26 | pathutil, |
|
26 | 27 | pycompat, |
@@ -94,33 +95,16 b' class nullvfs(lfsvfs):' | |||
|
94 | 95 | pass |
|
95 | 96 | |
|
96 | 97 | |
|
97 | class filewithprogress(object): | |
|
98 |
"""a file-like object that supports |
|
|
99 | ||
|
100 | Useful to provide progress information for how many bytes are read. | |
|
98 | class lfsuploadfile(httpconnectionmod.httpsendfile): | |
|
99 | """a file-like object that supports keepalive. | |
|
101 | 100 | """ |
|
102 | 101 | |
|
103 |
def __init__(self, |
|
|
104 | self._fp = fp | |
|
105 | self._callback = callback # func(readsize) | |
|
106 | fp.seek(0, os.SEEK_END) | |
|
107 | self._len = fp.tell() | |
|
108 | fp.seek(0) | |
|
109 | ||
|
110 | def __len__(self): | |
|
111 | return self._len | |
|
102 | def __init__(self, ui, filename): | |
|
103 | super(lfsuploadfile, self).__init__(ui, filename, b'rb') | |
|
104 | self.read = self._data.read | |
|
112 | 105 | |
|
113 |
def |
|
|
114 | if self._fp is None: | |
|
115 | return b'' | |
|
116 | data = self._fp.read(size) | |
|
117 | if data: | |
|
118 | if self._callback: | |
|
119 | self._callback(len(data)) | |
|
120 | else: | |
|
121 | self._fp.close() | |
|
122 | self._fp = None | |
|
123 | return data | |
|
106 | def _makeprogress(self): | |
|
107 | return None # progress is handled by the worker client | |
|
124 | 108 | |
|
125 | 109 | |
|
126 | 110 | class local(object): |
@@ -144,6 +128,17 b' class local(object):' | |||
|
144 | 128 | def open(self, oid): |
|
145 | 129 | """Open a read-only file descriptor to the named blob, in either the |
|
146 | 130 | usercache or the local store.""" |
|
131 | return open(self.path(oid), 'rb') | |
|
132 | ||
|
133 | def path(self, oid): | |
|
134 | """Build the path for the given blob ``oid``. | |
|
135 | ||
|
136 | If the blob exists locally, the path may point to either the usercache | |
|
137 | or the local store. If it doesn't, it will point to the local store. | |
|
138 | This is meant for situations where existing code that isn't LFS aware | |
|
139 | needs to open a blob. Generally, prefer the ``open`` method on this | |
|
140 | class. | |
|
141 | """ | |
|
147 | 142 | # The usercache is the most likely place to hold the file. Commit will |
|
148 | 143 | # write to both it and the local store, as will anything that downloads |
|
149 | 144 | # the blobs. However, things like clone without an update won't |
@@ -151,9 +146,9 b' class local(object):' | |||
|
151 | 146 | # the usercache is the only place it _could_ be. If not present, the |
|
152 | 147 | # missing file msg here will indicate the local repo, not the usercache. |
|
153 | 148 | if self.cachevfs.exists(oid): |
|
154 |
return self.cachevfs(oid |
|
|
149 | return self.cachevfs.join(oid) | |
|
155 | 150 | |
|
156 |
return self.vfs(oid |
|
|
151 | return self.vfs.join(oid) | |
|
157 | 152 | |
|
158 | 153 | def download(self, oid, src, content_length): |
|
159 | 154 | """Read the blob from the remote source in chunks, verify the content, |
@@ -495,15 +490,17 b' class _gitlfsremote(object):' | |||
|
495 | 490 | _(b'detected corrupt lfs object: %s') % oid, |
|
496 | 491 | hint=_(b'run hg verify'), |
|
497 | 492 | ) |
|
498 | request.data = filewithprogress(localstore.open(oid), None) | |
|
499 | request.get_method = lambda: r'PUT' | |
|
500 | request.add_header('Content-Type', 'application/octet-stream') | |
|
501 | request.add_header('Content-Length', len(request.data)) | |
|
502 | 493 | |
|
503 | 494 | for k, v in headers: |
|
504 | 495 | request.add_header(pycompat.strurl(k), pycompat.strurl(v)) |
|
505 | 496 | |
|
506 | 497 | try: |
|
498 | if action == b'upload': | |
|
499 | request.data = lfsuploadfile(self.ui, localstore.path(oid)) | |
|
500 | request.get_method = lambda: 'PUT' | |
|
501 | request.add_header('Content-Type', 'application/octet-stream') | |
|
502 | request.add_header('Content-Length', request.data.length) | |
|
503 | ||
|
507 | 504 | with contextlib.closing(self.urlopener.open(request)) as res: |
|
508 | 505 | contentlength = res.info().get(b"content-length") |
|
509 | 506 | ui = self.ui # Shorten debug lines |
@@ -545,6 +542,9 b' class _gitlfsremote(object):' | |||
|
545 | 542 | raise LfsRemoteError( |
|
546 | 543 | _(b'LFS error: %s') % _urlerrorreason(ex), hint=hint |
|
547 | 544 | ) |
|
545 | finally: | |
|
546 | if request.data: | |
|
547 | request.data.close() | |
|
548 | 548 | |
|
549 | 549 | def _batch(self, pointers, localstore, action): |
|
550 | 550 | if action not in [b'upload', b'download']: |
@@ -59,6 +59,13 b' class processlogger(object):' | |||
|
59 | 59 | |
|
60 | 60 | def log(self, ui, event, msg, opts): |
|
61 | 61 | script = self._scripts[event] |
|
62 | maxmsg = 100000 | |
|
63 | if len(msg) > maxmsg: | |
|
64 | # Each env var has a 128KiB limit on linux. msg can be long, in | |
|
65 | # particular for command event, where it's the full command line. | |
|
66 | # Prefer truncating the message than raising "Argument list too | |
|
67 | # long" error. | |
|
68 | msg = msg[:maxmsg] + b' (truncated)' | |
|
62 | 69 | env = { |
|
63 | 70 | b'EVENT': event, |
|
64 | 71 | b'HGPID': os.getpid(), |
@@ -858,7 +858,7 b' class queue(object):' | |||
|
858 | 858 | strip(self.ui, repo, [n], update=False, backup=False) |
|
859 | 859 | |
|
860 | 860 | ctx = repo[rev] |
|
861 |
ret = hg.merge( |
|
|
861 | ret = hg.merge(ctx, remind=False) | |
|
862 | 862 | if ret: |
|
863 | 863 | raise error.Abort(_(b"update returned %d") % ret) |
|
864 | 864 | n = newcommit(repo, None, ctx.description(), ctx.user(), force=True) |
@@ -1162,7 +1162,7 b' class queue(object):' | |||
|
1162 | 1162 | |
|
1163 | 1163 | if unknown: |
|
1164 | 1164 | if numrevs: |
|
1165 |
rev = |
|
|
1165 | rev = {entry.name: entry.node for entry in qfinished} | |
|
1166 | 1166 | for p in unknown: |
|
1167 | 1167 | msg = _(b'revision %s refers to unknown patches: %s\n') |
|
1168 | 1168 | self.ui.warn(msg % (short(rev[p]), p)) |
@@ -3361,7 +3361,7 b' def guard(ui, repo, *args, **opts):' | |||
|
3361 | 3361 | ui.write(b'\n') |
|
3362 | 3362 | |
|
3363 | 3363 | q = repo.mq |
|
3364 |
applied = |
|
|
3364 | applied = {p.name for p in q.applied} | |
|
3365 | 3365 | patch = None |
|
3366 | 3366 | args = list(args) |
|
3367 | 3367 | if opts.get('list'): |
@@ -133,6 +133,15 b' notify.fromauthor' | |||
|
133 | 133 | the "From" field of the notification mail. If not set, take the user |
|
134 | 134 | from the pushing repo. Default: False. |
|
135 | 135 | |
|
136 | notify.reply-to-predecessor (EXPERIMENTAL) | |
|
137 | If set and the changeset has a predecessor in the repository, try to thread | |
|
138 | the notification mail with the predecessor. This adds the "In-Reply-To" header | |
|
139 | to the notification mail with a reference to the predecessor with the smallest | |
|
140 | revision number. Mail threads can still be torn, especially when changesets | |
|
141 | are folded. | |
|
142 | ||
|
143 | This option must be used in combination with ``notify.messageidseed``. | |
|
144 | ||
|
136 | 145 | If set, the following entries will also be used to customize the |
|
137 | 146 | notifications: |
|
138 | 147 | |
@@ -160,6 +169,7 b' from mercurial import (' | |||
|
160 | 169 | error, |
|
161 | 170 | logcmdutil, |
|
162 | 171 | mail, |
|
172 | obsutil, | |
|
163 | 173 | patch, |
|
164 | 174 | pycompat, |
|
165 | 175 | registrar, |
@@ -219,6 +229,9 b' configitem(' | |||
|
219 | 229 | b'notify', b'outgoing', default=None, |
|
220 | 230 | ) |
|
221 | 231 | configitem( |
|
232 | b'notify', b'reply-to-predecessor', default=False, | |
|
233 | ) | |
|
234 | configitem( | |
|
222 | 235 | b'notify', b'sources', default=b'serve', |
|
223 | 236 | ) |
|
224 | 237 | configitem( |
@@ -281,6 +294,16 b' class notifier(object):' | |||
|
281 | 294 | self.merge = self.ui.configbool(b'notify', b'merge') |
|
282 | 295 | self.showfunc = self.ui.configbool(b'notify', b'showfunc') |
|
283 | 296 | self.messageidseed = self.ui.config(b'notify', b'messageidseed') |
|
297 | self.reply = self.ui.configbool(b'notify', b'reply-to-predecessor') | |
|
298 | ||
|
299 | if self.reply and not self.messageidseed: | |
|
300 | raise error.Abort( | |
|
301 | _( | |
|
302 | b'notify.reply-to-predecessor used without ' | |
|
303 | b'notify.messageidseed' | |
|
304 | ) | |
|
305 | ) | |
|
306 | ||
|
284 | 307 | if self.showfunc is None: |
|
285 | 308 | self.showfunc = self.ui.configbool(b'diff', b'showfunc') |
|
286 | 309 | |
@@ -437,6 +460,26 b' class notifier(object):' | |||
|
437 | 460 | msg['X-Hg-Notification'] = 'changeset %s' % ctx |
|
438 | 461 | if not msg['Message-Id']: |
|
439 | 462 | msg['Message-Id'] = messageid(ctx, self.domain, self.messageidseed) |
|
463 | if self.reply: | |
|
464 | unfi = self.repo.unfiltered() | |
|
465 | has_node = unfi.changelog.index.has_node | |
|
466 | predecessors = [ | |
|
467 | unfi[ctx2] | |
|
468 | for ctx2 in obsutil.allpredecessors(unfi.obsstore, [ctx.node()]) | |
|
469 | if ctx2 != ctx.node() and has_node(ctx2) | |
|
470 | ] | |
|
471 | if predecessors: | |
|
472 | # There is at least one predecessor, so which to pick? | |
|
473 | # Ideally, there is a unique root because changesets have | |
|
474 | # been evolved/rebased one step at a time. In this case, | |
|
475 | # just picking the oldest known changeset provides a stable | |
|
476 | # base. It doesn't help when changesets are folded. Any | |
|
477 | # better solution would require storing more information | |
|
478 | # in the repository. | |
|
479 | pred = min(predecessors, key=lambda ctx: ctx.rev()) | |
|
480 | msg['In-Reply-To'] = messageid( | |
|
481 | pred, self.domain, self.messageidseed | |
|
482 | ) | |
|
440 | 483 | msg['To'] = ', '.join(sorted(subs)) |
|
441 | 484 | |
|
442 | 485 | msgtext = msg.as_bytes() if pycompat.ispy3 else msg.as_string() |
This diff has been collapsed as it changes many lines, (793 lines changed) Show them Hide them | |||
@@ -54,13 +54,14 b' import mimetypes' | |||
|
54 | 54 | import operator |
|
55 | 55 | import re |
|
56 | 56 | |
|
57 | from mercurial.node import bin, nullid | |
|
57 | from mercurial.node import bin, nullid, short | |
|
58 | 58 | from mercurial.i18n import _ |
|
59 | 59 | from mercurial.pycompat import getattr |
|
60 | 60 | from mercurial.thirdparty import attr |
|
61 | 61 | from mercurial import ( |
|
62 | 62 | cmdutil, |
|
63 | 63 | context, |
|
64 | copies, | |
|
64 | 65 | encoding, |
|
65 | 66 | error, |
|
66 | 67 | exthelper, |
@@ -114,6 +115,10 b' eh.configitem(' | |||
|
114 | 115 | eh.configitem( |
|
115 | 116 | b'phabricator', b'curlcmd', default=None, |
|
116 | 117 | ) |
|
118 | # developer config: phabricator.debug | |
|
119 | eh.configitem( | |
|
120 | b'phabricator', b'debug', default=False, | |
|
121 | ) | |
|
117 | 122 | # developer config: phabricator.repophid |
|
118 | 123 | eh.configitem( |
|
119 | 124 | b'phabricator', b'repophid', default=None, |
@@ -124,6 +129,12 b' eh.configitem(' | |||
|
124 | 129 | eh.configitem( |
|
125 | 130 | b'phabsend', b'confirm', default=False, |
|
126 | 131 | ) |
|
132 | eh.configitem( | |
|
133 | b'phabimport', b'secret', default=False, | |
|
134 | ) | |
|
135 | eh.configitem( | |
|
136 | b'phabimport', b'obsolete', default=False, | |
|
137 | ) | |
|
127 | 138 | |
|
128 | 139 | colortable = { |
|
129 | 140 | b'phabricator.action.created': b'green', |
@@ -257,19 +268,36 b' def vcrcommand(name, flags, spec, helpca' | |||
|
257 | 268 | return fn(*args, **kwargs) |
|
258 | 269 | return fn(*args, **kwargs) |
|
259 | 270 | |
|
260 | inner.__name__ = fn.__name__ | |
|
261 |
|
|
|
271 | cmd = util.checksignature(inner, depth=2) | |
|
272 | cmd.__name__ = fn.__name__ | |
|
273 | cmd.__doc__ = fn.__doc__ | |
|
274 | ||
|
262 | 275 | return command( |
|
263 | 276 | name, |
|
264 | 277 | fullflags, |
|
265 | 278 | spec, |
|
266 | 279 | helpcategory=helpcategory, |
|
267 | 280 | optionalrepo=optionalrepo, |
|
268 |
)( |
|
|
281 | )(cmd) | |
|
269 | 282 | |
|
270 | 283 | return decorate |
|
271 | 284 | |
|
272 | 285 | |
|
286 | def _debug(ui, *msg, **opts): | |
|
287 | """write debug output for Phabricator if ``phabricator.debug`` is set | |
|
288 | ||
|
289 | Specifically, this avoids dumping Conduit and HTTP auth chatter that is | |
|
290 | printed with the --debug argument. | |
|
291 | """ | |
|
292 | if ui.configbool(b"phabricator", b"debug"): | |
|
293 | flag = ui.debugflag | |
|
294 | try: | |
|
295 | ui.debugflag = True | |
|
296 | ui.write(*msg, **opts) | |
|
297 | finally: | |
|
298 | ui.debugflag = flag | |
|
299 | ||
|
300 | ||
|
273 | 301 | def urlencodenested(params): |
|
274 | 302 | """like urlencode, but works with nested parameters. |
|
275 | 303 | |
@@ -446,7 +474,8 b' def getoldnodedrevmap(repo, nodelist):' | |||
|
446 | 474 | has_node = unfi.changelog.index.has_node |
|
447 | 475 | |
|
448 | 476 | result = {} # {node: (oldnode?, lastdiff?, drev)} |
|
449 | toconfirm = {} # {node: (force, {precnode}, drev)} | |
|
477 | # ordered for test stability when printing new -> old mapping below | |
|
478 | toconfirm = util.sortdict() # {node: (force, {precnode}, drev)} | |
|
450 | 479 | for node in nodelist: |
|
451 | 480 | ctx = unfi[node] |
|
452 | 481 | # For tags like "D123", put them into "toconfirm" to verify later |
@@ -474,18 +503,23 b' def getoldnodedrevmap(repo, nodelist):' | |||
|
474 | 503 | alldiffs = callconduit( |
|
475 | 504 | unfi.ui, b'differential.querydiffs', {b'revisionIDs': drevs} |
|
476 | 505 | ) |
|
477 | getnode = lambda d: bin(getdiffmeta(d).get(b'node', b'')) or None | |
|
506 | ||
|
507 | def getnodes(d, precset): | |
|
508 | # Ignore other nodes that were combined into the Differential | |
|
509 | # that aren't predecessors of the current local node. | |
|
510 | return [n for n in getlocalcommits(d) if n in precset] | |
|
511 | ||
|
478 | 512 | for newnode, (force, precset, drev) in toconfirm.items(): |
|
479 | 513 | diffs = [ |
|
480 | 514 | d for d in alldiffs.values() if int(d[b'revisionID']) == drev |
|
481 | 515 | ] |
|
482 | 516 | |
|
483 |
# |
|
|
484 |
phprecset = |
|
|
517 | # local predecessors known by Phabricator | |
|
518 | phprecset = {n for d in diffs for n in getnodes(d, precset)} | |
|
485 | 519 | |
|
486 | 520 | # Ignore if precursors (Phabricator and local repo) do not overlap, |
|
487 | 521 | # and force is not set (when commit message says nothing) |
|
488 |
if not force and not |
|
|
522 | if not force and not phprecset: | |
|
489 | 523 | tagname = b'D%d' % drev |
|
490 | 524 | tags.tag( |
|
491 | 525 | repo, |
@@ -510,7 +544,33 b' def getoldnodedrevmap(repo, nodelist):' | |||
|
510 | 544 | oldnode = lastdiff = None |
|
511 | 545 | if diffs: |
|
512 | 546 | lastdiff = max(diffs, key=lambda d: int(d[b'id'])) |
|
513 | oldnode = getnode(lastdiff) | |
|
547 | oldnodes = getnodes(lastdiff, precset) | |
|
548 | ||
|
549 | _debug( | |
|
550 | unfi.ui, | |
|
551 | b"%s mapped to old nodes %s\n" | |
|
552 | % ( | |
|
553 | short(newnode), | |
|
554 | stringutil.pprint([short(n) for n in sorted(oldnodes)]), | |
|
555 | ), | |
|
556 | ) | |
|
557 | ||
|
558 | # If this commit was the result of `hg fold` after submission, | |
|
559 | # and now resubmitted with --fold, the easiest thing to do is | |
|
560 | # to leave the node clear. This only results in creating a new | |
|
561 | # diff for the _same_ Differential Revision if this commit is | |
|
562 | # the first or last in the selected range. If we picked a node | |
|
563 | # from the list instead, it would have to be the lowest if at | |
|
564 | # the beginning of the --fold range, or the highest at the end. | |
|
565 | # Otherwise, one or more of the nodes wouldn't be considered in | |
|
566 | # the diff, and the Differential wouldn't be properly updated. | |
|
567 | # If this commit is the result of `hg split` in the same | |
|
568 | # scenario, there is a single oldnode here (and multiple | |
|
569 | # newnodes mapped to it). That makes it the same as the normal | |
|
570 | # case, as the edges of the newnode range cleanly maps to one | |
|
571 | # oldnode each. | |
|
572 | if len(oldnodes) == 1: | |
|
573 | oldnode = oldnodes[0] | |
|
514 | 574 | if oldnode and not has_node(oldnode): |
|
515 | 575 | oldnode = None |
|
516 | 576 | |
@@ -542,11 +602,11 b' def getdrevmap(repo, revs):' | |||
|
542 | 602 | return result |
|
543 | 603 | |
|
544 | 604 | |
|
545 | def getdiff(ctx, diffopts): | |
|
605 | def getdiff(basectx, ctx, diffopts): | |
|
546 | 606 | """plain-text diff without header (user, commit message, etc)""" |
|
547 | 607 | output = util.stringio() |
|
548 | 608 | for chunk, _label in patch.diffui( |
|
549 | ctx.repo(), ctx.p1().node(), ctx.node(), None, opts=diffopts | |
|
609 | ctx.repo(), basectx.p1().node(), ctx.node(), None, opts=diffopts | |
|
550 | 610 | ): |
|
551 | 611 | output.write(chunk) |
|
552 | 612 | return output.getvalue() |
@@ -653,13 +713,13 b' class phabdiff(object):' | |||
|
653 | 713 | ) |
|
654 | 714 | |
|
655 | 715 | |
|
656 | def maketext(pchange, ctx, fname): | |
|
716 | def maketext(pchange, basectx, ctx, fname): | |
|
657 | 717 | """populate the phabchange for a text file""" |
|
658 | 718 | repo = ctx.repo() |
|
659 | 719 | fmatcher = match.exact([fname]) |
|
660 | 720 | diffopts = mdiff.diffopts(git=True, context=32767) |
|
661 | 721 | _pfctx, _fctx, header, fhunks = next( |
|
662 | patch.diffhunks(repo, ctx.p1(), ctx, fmatcher, opts=diffopts) | |
|
722 | patch.diffhunks(repo, basectx.p1(), ctx, fmatcher, opts=diffopts) | |
|
663 | 723 | ) |
|
664 | 724 | |
|
665 | 725 | for fhunk in fhunks: |
@@ -747,12 +807,14 b' def uploadfile(fctx):' | |||
|
747 | 807 | return fphid |
|
748 | 808 | |
|
749 | 809 | |
|
750 | def addoldbinary(pchange, fctx): | |
|
810 | def addoldbinary(pchange, oldfctx, fctx): | |
|
751 | 811 | """add the metadata for the previous version of a binary file to the |
|
752 | 812 | phabchange for the new version |
|
813 | ||
|
814 | ``oldfctx`` is the previous version of the file; ``fctx`` is the new | |
|
815 | version of the file, or None if the file is being removed. | |
|
753 | 816 | """ |
|
754 | oldfctx = fctx.p1() | |
|
755 | if fctx.cmp(oldfctx): | |
|
817 | if not fctx or fctx.cmp(oldfctx): | |
|
756 | 818 | # Files differ, add the old one |
|
757 | 819 | pchange.metadata[b'old:file:size'] = oldfctx.size() |
|
758 | 820 | mimeguess, _enc = mimetypes.guess_type( |
@@ -794,8 +856,6 b' def notutf8(fctx):' | |||
|
794 | 856 | """ |
|
795 | 857 | try: |
|
796 | 858 | fctx.data().decode('utf-8') |
|
797 | if fctx.parents(): | |
|
798 | fctx.p1().data().decode('utf-8') | |
|
799 | 859 | return False |
|
800 | 860 | except UnicodeDecodeError: |
|
801 | 861 | fctx.repo().ui.write( |
@@ -805,56 +865,76 b' def notutf8(fctx):' | |||
|
805 | 865 | return True |
|
806 | 866 | |
|
807 | 867 | |
|
808 | def addremoved(pdiff, ctx, removed): | |
|
868 | def addremoved(pdiff, basectx, ctx, removed): | |
|
809 | 869 | """add removed files to the phabdiff. Shouldn't include moves""" |
|
810 | 870 | for fname in removed: |
|
811 | 871 | pchange = phabchange( |
|
812 | 872 | currentPath=fname, oldPath=fname, type=DiffChangeType.DELETE |
|
813 | 873 | ) |
|
814 | pchange.addoldmode(gitmode[ctx.p1()[fname].flags()]) | |
|
815 | fctx = ctx.p1()[fname] | |
|
816 | if not (fctx.isbinary() or notutf8(fctx)): | |
|
817 | maketext(pchange, ctx, fname) | |
|
874 | oldfctx = basectx.p1()[fname] | |
|
875 | pchange.addoldmode(gitmode[oldfctx.flags()]) | |
|
876 | if not (oldfctx.isbinary() or notutf8(oldfctx)): | |
|
877 | maketext(pchange, basectx, ctx, fname) | |
|
818 | 878 | |
|
819 | 879 | pdiff.addchange(pchange) |
|
820 | 880 | |
|
821 | 881 | |
|
822 | def addmodified(pdiff, ctx, modified): | |
|
882 | def addmodified(pdiff, basectx, ctx, modified): | |
|
823 | 883 | """add modified files to the phabdiff""" |
|
824 | 884 | for fname in modified: |
|
825 | 885 | fctx = ctx[fname] |
|
886 | oldfctx = basectx.p1()[fname] | |
|
826 | 887 | pchange = phabchange(currentPath=fname, oldPath=fname) |
|
827 |
filemode = gitmode[ctx |
|
|
828 |
originalmode = gitmode[ctx |
|
|
888 | filemode = gitmode[fctx.flags()] | |
|
889 | originalmode = gitmode[oldfctx.flags()] | |
|
829 | 890 | if filemode != originalmode: |
|
830 | 891 | pchange.addoldmode(originalmode) |
|
831 | 892 | pchange.addnewmode(filemode) |
|
832 | 893 | |
|
833 | if fctx.isbinary() or notutf8(fctx): | |
|
894 | if ( | |
|
895 | fctx.isbinary() | |
|
896 | or notutf8(fctx) | |
|
897 | or oldfctx.isbinary() | |
|
898 | or notutf8(oldfctx) | |
|
899 | ): | |
|
834 | 900 | makebinary(pchange, fctx) |
|
835 | addoldbinary(pchange, fctx) | |
|
901 | addoldbinary(pchange, oldfctx, fctx) | |
|
836 | 902 | else: |
|
837 | maketext(pchange, ctx, fname) | |
|
903 | maketext(pchange, basectx, ctx, fname) | |
|
838 | 904 | |
|
839 | 905 | pdiff.addchange(pchange) |
|
840 | 906 | |
|
841 | 907 | |
|
842 | def addadded(pdiff, ctx, added, removed): | |
|
908 | def addadded(pdiff, basectx, ctx, added, removed): | |
|
843 | 909 | """add file adds to the phabdiff, both new files and copies/moves""" |
|
844 | 910 | # Keep track of files that've been recorded as moved/copied, so if there are |
|
845 | 911 | # additional copies we can mark them (moves get removed from removed) |
|
846 | 912 | copiedchanges = {} |
|
847 | 913 | movedchanges = {} |
|
914 | ||
|
915 | copy = {} | |
|
916 | if basectx != ctx: | |
|
917 | copy = copies.pathcopies(basectx.p1(), ctx) | |
|
918 | ||
|
848 | 919 | for fname in added: |
|
849 | 920 | fctx = ctx[fname] |
|
921 | oldfctx = None | |
|
850 | 922 | pchange = phabchange(currentPath=fname) |
|
851 | 923 | |
|
852 |
filemode = gitmode[ctx |
|
|
853 | renamed = fctx.renamed() | |
|
924 | filemode = gitmode[fctx.flags()] | |
|
925 | ||
|
926 | if copy: | |
|
927 | originalfname = copy.get(fname, fname) | |
|
928 | else: | |
|
929 | originalfname = fname | |
|
930 | if fctx.renamed(): | |
|
931 | originalfname = fctx.renamed()[0] | |
|
932 | ||
|
933 | renamed = fname != originalfname | |
|
854 | 934 | |
|
855 | 935 | if renamed: |
|
856 | originalfname = renamed[0] | |
|
857 |
originalmode = gitmode[ |
|
|
936 | oldfctx = basectx.p1()[originalfname] | |
|
937 | originalmode = gitmode[oldfctx.flags()] | |
|
858 | 938 | pchange.oldPath = originalfname |
|
859 | 939 | |
|
860 | 940 | if originalfname in removed: |
@@ -889,12 +969,16 b' def addadded(pdiff, ctx, added, removed)' | |||
|
889 | 969 | pchange.addnewmode(gitmode[fctx.flags()]) |
|
890 | 970 | pchange.type = DiffChangeType.ADD |
|
891 | 971 | |
|
892 | if fctx.isbinary() or notutf8(fctx): | |
|
972 | if ( | |
|
973 | fctx.isbinary() | |
|
974 | or notutf8(fctx) | |
|
975 | or (oldfctx and (oldfctx.isbinary() or notutf8(oldfctx))) | |
|
976 | ): | |
|
893 | 977 | makebinary(pchange, fctx) |
|
894 | 978 | if renamed: |
|
895 | addoldbinary(pchange, fctx) | |
|
979 | addoldbinary(pchange, oldfctx, fctx) | |
|
896 | 980 | else: |
|
897 | maketext(pchange, ctx, fname) | |
|
981 | maketext(pchange, basectx, ctx, fname) | |
|
898 | 982 | |
|
899 | 983 | pdiff.addchange(pchange) |
|
900 | 984 | |
@@ -904,21 +988,21 b' def addadded(pdiff, ctx, added, removed)' | |||
|
904 | 988 | pdiff.addchange(movedchange) |
|
905 | 989 | |
|
906 | 990 | |
|
907 | def creatediff(ctx): | |
|
991 | def creatediff(basectx, ctx): | |
|
908 | 992 | """create a Differential Diff""" |
|
909 | 993 | repo = ctx.repo() |
|
910 | 994 | repophid = getrepophid(repo) |
|
911 | 995 | # Create a "Differential Diff" via "differential.creatediff" API |
|
912 | 996 | pdiff = phabdiff( |
|
913 | sourceControlBaseRevision=b'%s' % ctx.p1().hex(), | |
|
997 | sourceControlBaseRevision=b'%s' % basectx.p1().hex(), | |
|
914 | 998 | branch=b'%s' % ctx.branch(), |
|
915 | 999 | ) |
|
916 | modified, added, removed, _d, _u, _i, _c = ctx.p1().status(ctx) | |
|
1000 | modified, added, removed, _d, _u, _i, _c = basectx.p1().status(ctx) | |
|
917 | 1001 | # addadded will remove moved files from removed, so addremoved won't get |
|
918 | 1002 | # them |
|
919 | addadded(pdiff, ctx, added, removed) | |
|
920 | addmodified(pdiff, ctx, modified) | |
|
921 | addremoved(pdiff, ctx, removed) | |
|
1003 | addadded(pdiff, basectx, ctx, added, removed) | |
|
1004 | addmodified(pdiff, basectx, ctx, modified) | |
|
1005 | addremoved(pdiff, basectx, ctx, removed) | |
|
922 | 1006 | if repophid: |
|
923 | 1007 | pdiff.repositoryPHID = repophid |
|
924 | 1008 | diff = callconduit( |
@@ -927,52 +1011,64 b' def creatediff(ctx):' | |||
|
927 | 1011 | pycompat.byteskwargs(attr.asdict(pdiff)), |
|
928 | 1012 | ) |
|
929 | 1013 | if not diff: |
|
930 | raise error.Abort(_(b'cannot create diff for %s') % ctx) | |
|
1014 | if basectx != ctx: | |
|
1015 | msg = _(b'cannot create diff for %s::%s') % (basectx, ctx) | |
|
1016 | else: | |
|
1017 | msg = _(b'cannot create diff for %s') % ctx | |
|
1018 | raise error.Abort(msg) | |
|
931 | 1019 | return diff |
|
932 | 1020 | |
|
933 | 1021 | |
|
934 | def writediffproperties(ctx, diff): | |
|
935 |
"""write metadata to diff so patches could be applied losslessly |
|
|
1022 | def writediffproperties(ctxs, diff): | |
|
1023 | """write metadata to diff so patches could be applied losslessly | |
|
1024 | ||
|
1025 | ``ctxs`` is the list of commits that created the diff, in ascending order. | |
|
1026 | The list is generally a single commit, but may be several when using | |
|
1027 | ``phabsend --fold``. | |
|
1028 | """ | |
|
936 | 1029 | # creatediff returns with a diffid but query returns with an id |
|
937 | 1030 | diffid = diff.get(b'diffid', diff.get(b'id')) |
|
1031 | basectx = ctxs[0] | |
|
1032 | tipctx = ctxs[-1] | |
|
1033 | ||
|
938 | 1034 | params = { |
|
939 | 1035 | b'diff_id': diffid, |
|
940 | 1036 | b'name': b'hg:meta', |
|
941 | 1037 | b'data': templatefilters.json( |
|
942 | 1038 | { |
|
943 | b'user': ctx.user(), | |
|
944 | b'date': b'%d %d' % ctx.date(), | |
|
945 | b'branch': ctx.branch(), | |
|
946 | b'node': ctx.hex(), | |
|
947 | b'parent': ctx.p1().hex(), | |
|
1039 | b'user': tipctx.user(), | |
|
1040 | b'date': b'%d %d' % tipctx.date(), | |
|
1041 | b'branch': tipctx.branch(), | |
|
1042 | b'node': tipctx.hex(), | |
|
1043 | b'parent': basectx.p1().hex(), | |
|
948 | 1044 | } |
|
949 | 1045 | ), |
|
950 | 1046 | } |
|
951 | callconduit(ctx.repo().ui, b'differential.setdiffproperty', params) | |
|
1047 | callconduit(basectx.repo().ui, b'differential.setdiffproperty', params) | |
|
952 | 1048 | |
|
953 |
|
|
|
954 | b'diff_id': diffid, | |
|
955 | b'name': b'local:commits', | |
|
956 | b'data': templatefilters.json( | |
|
957 | { | |
|
958 | ctx.hex(): { | |
|
1049 | commits = {} | |
|
1050 | for ctx in ctxs: | |
|
1051 | commits[ctx.hex()] = { | |
|
959 | 1052 |
|
|
960 | 1053 |
|
|
961 | 1054 |
|
|
962 | 1055 |
|
|
963 | 1056 |
|
|
964 | 1057 |
|
|
965 | }, | |
|
966 | 1058 |
|
|
967 | ), | |
|
1059 | params = { | |
|
1060 | b'diff_id': diffid, | |
|
1061 | b'name': b'local:commits', | |
|
1062 | b'data': templatefilters.json(commits), | |
|
968 | 1063 | } |
|
969 | callconduit(ctx.repo().ui, b'differential.setdiffproperty', params) | |
|
1064 | callconduit(basectx.repo().ui, b'differential.setdiffproperty', params) | |
|
970 | 1065 | |
|
971 | 1066 | |
|
972 | 1067 | def createdifferentialrevision( |
|
973 | ctx, | |
|
1068 | ctxs, | |
|
974 | 1069 | revid=None, |
|
975 | 1070 | parentrevphid=None, |
|
1071 | oldbasenode=None, | |
|
976 | 1072 | oldnode=None, |
|
977 | 1073 | olddiff=None, |
|
978 | 1074 | actions=None, |
@@ -983,22 +1079,38 b' def createdifferentialrevision(' | |||
|
983 | 1079 | If revid is None, create a new Differential Revision, otherwise update |
|
984 | 1080 | revid. If parentrevphid is not None, set it as a dependency. |
|
985 | 1081 | |
|
1082 | If there is a single commit for the new Differential Revision, ``ctxs`` will | |
|
1083 | be a list of that single context. Otherwise, it is a list that covers the | |
|
1084 | range of changes for the differential, where ``ctxs[0]`` is the first change | |
|
1085 | to include and ``ctxs[-1]`` is the last. | |
|
1086 | ||
|
986 | 1087 | If oldnode is not None, check if the patch content (without commit message |
|
987 | and metadata) has changed before creating another diff. | |
|
1088 | and metadata) has changed before creating another diff. For a Revision with | |
|
1089 | a single commit, ``oldbasenode`` and ``oldnode`` have the same value. For a | |
|
1090 | Revision covering multiple commits, ``oldbasenode`` corresponds to | |
|
1091 | ``ctxs[0]`` the previous time this Revision was posted, and ``oldnode`` | |
|
1092 | corresponds to ``ctxs[-1]``. | |
|
988 | 1093 | |
|
989 | 1094 | If actions is not None, they will be appended to the transaction. |
|
990 | 1095 | """ |
|
1096 | ctx = ctxs[-1] | |
|
1097 | basectx = ctxs[0] | |
|
1098 | ||
|
991 | 1099 | repo = ctx.repo() |
|
992 | 1100 | if oldnode: |
|
993 | 1101 | diffopts = mdiff.diffopts(git=True, context=32767) |
|
994 |
|
|
|
995 | neednewdiff = getdiff(ctx, diffopts) != getdiff(oldctx, diffopts) | |
|
1102 | unfi = repo.unfiltered() | |
|
1103 | oldctx = unfi[oldnode] | |
|
1104 | oldbasectx = unfi[oldbasenode] | |
|
1105 | neednewdiff = getdiff(basectx, ctx, diffopts) != getdiff( | |
|
1106 | oldbasectx, oldctx, diffopts | |
|
1107 | ) | |
|
996 | 1108 | else: |
|
997 | 1109 | neednewdiff = True |
|
998 | 1110 | |
|
999 | 1111 | transactions = [] |
|
1000 | 1112 | if neednewdiff: |
|
1001 | diff = creatediff(ctx) | |
|
1113 | diff = creatediff(basectx, ctx) | |
|
1002 | 1114 | transactions.append({b'type': b'update', b'value': diff[b'phid']}) |
|
1003 | 1115 | if comment: |
|
1004 | 1116 | transactions.append({b'type': b'comment', b'value': comment}) |
@@ -1008,7 +1120,7 b' def createdifferentialrevision(' | |||
|
1008 | 1120 | # pushers could know the correct node metadata. |
|
1009 | 1121 | assert olddiff |
|
1010 | 1122 | diff = olddiff |
|
1011 | writediffproperties(ctx, diff) | |
|
1123 | writediffproperties(ctxs, diff) | |
|
1012 | 1124 | |
|
1013 | 1125 | # Set the parent Revision every time, so commit re-ordering is picked-up |
|
1014 | 1126 | if parentrevphid: |
@@ -1019,13 +1131,41 b' def createdifferentialrevision(' | |||
|
1019 | 1131 | if actions: |
|
1020 | 1132 | transactions += actions |
|
1021 | 1133 | |
|
1134 | # When folding multiple local commits into a single review, arcanist will | |
|
1135 | # take the summary line of the first commit as the title, and then | |
|
1136 | # concatenate the rest of the remaining messages (including each of their | |
|
1137 | # first lines) to the rest of the first commit message (each separated by | |
|
1138 | # an empty line), and use that as the summary field. Do the same here. | |
|
1139 | # For commits with only a one line message, there is no summary field, as | |
|
1140 | # this gets assigned to the title. | |
|
1141 | fields = util.sortdict() # sorted for stable wire protocol in tests | |
|
1142 | ||
|
1143 | for i, _ctx in enumerate(ctxs): | |
|
1022 | 1144 | # Parse commit message and update related fields. |
|
1023 | desc = ctx.description() | |
|
1145 | desc = _ctx.description() | |
|
1024 | 1146 | info = callconduit( |
|
1025 | 1147 | repo.ui, b'differential.parsecommitmessage', {b'corpus': desc} |
|
1026 | 1148 | ) |
|
1027 | for k, v in info[b'fields'].items(): | |
|
1028 |
|
|
|
1149 | ||
|
1150 | for k in [b'title', b'summary', b'testPlan']: | |
|
1151 | v = info[b'fields'].get(k) | |
|
1152 | if not v: | |
|
1153 | continue | |
|
1154 | ||
|
1155 | if i == 0: | |
|
1156 | # Title, summary and test plan (if present) are taken verbatim | |
|
1157 | # for the first commit. | |
|
1158 | fields[k] = v.rstrip() | |
|
1159 | continue | |
|
1160 | elif k == b'title': | |
|
1161 | # Add subsequent titles (i.e. the first line of the commit | |
|
1162 | # message) back to the summary. | |
|
1163 | k = b'summary' | |
|
1164 | ||
|
1165 | # Append any current field to the existing composite field | |
|
1166 | fields[k] = b'\n\n'.join(filter(None, [fields.get(k), v.rstrip()])) | |
|
1167 | ||
|
1168 | for k, v in fields.items(): | |
|
1029 | 1169 |
|
|
1030 | 1170 | |
|
1031 | 1171 | params = {b'transactions': transactions} |
@@ -1035,20 +1175,24 b' def createdifferentialrevision(' | |||
|
1035 | 1175 | |
|
1036 | 1176 | revision = callconduit(repo.ui, b'differential.revision.edit', params) |
|
1037 | 1177 | if not revision: |
|
1038 | raise error.Abort(_(b'cannot create revision for %s') % ctx) | |
|
1178 | if len(ctxs) == 1: | |
|
1179 | msg = _(b'cannot create revision for %s') % ctx | |
|
1180 | else: | |
|
1181 | msg = _(b'cannot create revision for %s::%s') % (basectx, ctx) | |
|
1182 | raise error.Abort(msg) | |
|
1039 | 1183 | |
|
1040 | 1184 | return revision, diff |
|
1041 | 1185 | |
|
1042 | 1186 | |
|
1043 |
def userphids( |
|
|
1187 | def userphids(ui, names): | |
|
1044 | 1188 | """convert user names to PHIDs""" |
|
1045 | 1189 | names = [name.lower() for name in names] |
|
1046 | 1190 | query = {b'constraints': {b'usernames': names}} |
|
1047 |
result = callconduit( |
|
|
1191 | result = callconduit(ui, b'user.search', query) | |
|
1048 | 1192 | # username not found is not an error of the API. So check if we have missed |
|
1049 | 1193 | # some names here. |
|
1050 | 1194 | data = result[b'data'] |
|
1051 |
resolved = |
|
|
1195 | resolved = {entry[b'fields'][b'username'].lower() for entry in data} | |
|
1052 | 1196 | unresolved = set(names) - resolved |
|
1053 | 1197 | if unresolved: |
|
1054 | 1198 | raise error.Abort( |
@@ -1057,6 +1201,45 b' def userphids(repo, names):' | |||
|
1057 | 1201 | return [entry[b'phid'] for entry in data] |
|
1058 | 1202 | |
|
1059 | 1203 | |
|
1204 | def _print_phabsend_action(ui, ctx, newrevid, action): | |
|
1205 | """print the ``action`` that occurred when posting ``ctx`` for review | |
|
1206 | ||
|
1207 | This is a utility function for the sending phase of ``phabsend``, which | |
|
1208 | makes it easier to show a status for all local commits with `--fold``. | |
|
1209 | """ | |
|
1210 | actiondesc = ui.label( | |
|
1211 | { | |
|
1212 | b'created': _(b'created'), | |
|
1213 | b'skipped': _(b'skipped'), | |
|
1214 | b'updated': _(b'updated'), | |
|
1215 | }[action], | |
|
1216 | b'phabricator.action.%s' % action, | |
|
1217 | ) | |
|
1218 | drevdesc = ui.label(b'D%d' % newrevid, b'phabricator.drev') | |
|
1219 | nodedesc = ui.label(bytes(ctx), b'phabricator.node') | |
|
1220 | desc = ui.label(ctx.description().split(b'\n')[0], b'phabricator.desc') | |
|
1221 | ui.write(_(b'%s - %s - %s: %s\n') % (drevdesc, actiondesc, nodedesc, desc)) | |
|
1222 | ||
|
1223 | ||
|
1224 | def _amend_diff_properties(unfi, drevid, newnodes, diff): | |
|
1225 | """update the local commit list for the ``diff`` associated with ``drevid`` | |
|
1226 | ||
|
1227 | This is a utility function for the amend phase of ``phabsend``, which | |
|
1228 | converts failures to warning messages. | |
|
1229 | """ | |
|
1230 | _debug( | |
|
1231 | unfi.ui, | |
|
1232 | b"new commits: %s\n" % stringutil.pprint([short(n) for n in newnodes]), | |
|
1233 | ) | |
|
1234 | ||
|
1235 | try: | |
|
1236 | writediffproperties([unfi[newnode] for newnode in newnodes], diff) | |
|
1237 | except util.urlerr.urlerror: | |
|
1238 | # If it fails just warn and keep going, otherwise the DREV | |
|
1239 | # associations will be lost | |
|
1240 | unfi.ui.warnnoi18n(b'Failed to update metadata for D%d\n' % drevid) | |
|
1241 | ||
|
1242 | ||
|
1060 | 1243 | @vcrcommand( |
|
1061 | 1244 | b'phabsend', |
|
1062 | 1245 | [ |
@@ -1071,6 +1254,7 b' def userphids(repo, names):' | |||
|
1071 | 1254 | _(b'add a comment to Revisions with new/updated Diffs'), |
|
1072 | 1255 | ), |
|
1073 | 1256 | (b'', b'confirm', None, _(b'ask for confirmation before sending')), |
|
1257 | (b'', b'fold', False, _(b'combine the revisions into one review')), | |
|
1074 | 1258 | ], |
|
1075 | 1259 | _(b'REV [OPTIONS]'), |
|
1076 | 1260 | helpcategory=command.CATEGORY_IMPORT_EXPORT, |
@@ -1099,6 +1283,12 b' def phabsend(ui, repo, *revs, **opts):' | |||
|
1099 | 1283 | [phabsend] |
|
1100 | 1284 | confirm = true |
|
1101 | 1285 | |
|
1286 | By default, a separate review will be created for each commit that is | |
|
1287 | selected, and will have the same parent/child relationship in Phabricator. | |
|
1288 | If ``--fold`` is set, multiple commits are rolled up into a single review | |
|
1289 | as if diffed from the parent of the first revision to the last. The commit | |
|
1290 | messages are concatenated in the summary field on Phabricator. | |
|
1291 | ||
|
1102 | 1292 | phabsend will check obsstore and the above association to decide whether to |
|
1103 | 1293 | update an existing Differential Revision, or create a new one. |
|
1104 | 1294 | """ |
@@ -1112,6 +1302,47 b' def phabsend(ui, repo, *revs, **opts):' | |||
|
1112 | 1302 | if opts.get(b'amend'): |
|
1113 | 1303 | cmdutil.checkunfinished(repo) |
|
1114 | 1304 | |
|
1305 | ctxs = [repo[rev] for rev in revs] | |
|
1306 | ||
|
1307 | if any(c for c in ctxs if c.obsolete()): | |
|
1308 | raise error.Abort(_(b"obsolete commits cannot be posted for review")) | |
|
1309 | ||
|
1310 | fold = opts.get(b'fold') | |
|
1311 | if fold: | |
|
1312 | if len(revs) == 1: | |
|
1313 | # TODO: just switch to --no-fold instead? | |
|
1314 | raise error.Abort(_(b"cannot fold a single revision")) | |
|
1315 | ||
|
1316 | # There's no clear way to manage multiple commits with a Dxxx tag, so | |
|
1317 | # require the amend option. (We could append "_nnn", but then it | |
|
1318 | # becomes jumbled if earlier commits are added to an update.) It should | |
|
1319 | # lock the repo and ensure that the range is editable, but that would | |
|
1320 | # make the code pretty convoluted. The default behavior of `arc` is to | |
|
1321 | # create a new review anyway. | |
|
1322 | if not opts.get(b"amend"): | |
|
1323 | raise error.Abort(_(b"cannot fold with --no-amend")) | |
|
1324 | ||
|
1325 | # Ensure the local commits are an unbroken range | |
|
1326 | revrange = repo.revs(b'(first(%ld)::last(%ld))', revs, revs) | |
|
1327 | if any(r for r in revs if r not in revrange) or any( | |
|
1328 | r for r in revrange if r not in revs | |
|
1329 | ): | |
|
1330 | raise error.Abort(_(b"cannot fold non-linear revisions")) | |
|
1331 | ||
|
1332 | # It might be possible to bucketize the revisions by the DREV value, and | |
|
1333 | # iterate over those groups when posting, and then again when amending. | |
|
1334 | # But for simplicity, require all selected revisions to be for the same | |
|
1335 | # DREV (if present). Adding local revisions to an existing DREV is | |
|
1336 | # acceptable. | |
|
1337 | drevmatchers = [ | |
|
1338 | _differentialrevisiondescre.search(ctx.description()) | |
|
1339 | for ctx in ctxs | |
|
1340 | ] | |
|
1341 | if len({m.group('url') for m in drevmatchers if m}) > 1: | |
|
1342 | raise error.Abort( | |
|
1343 | _(b"cannot fold revisions with different DREV values") | |
|
1344 | ) | |
|
1345 | ||
|
1115 | 1346 | # {newnode: (oldnode, olddiff, olddrev} |
|
1116 | 1347 | oldmap = getoldnodedrevmap(repo, [repo[r].node() for r in revs]) |
|
1117 | 1348 | |
@@ -1127,10 +1358,13 b' def phabsend(ui, repo, *revs, **opts):' | |||
|
1127 | 1358 | blockers = opts.get(b'blocker', []) |
|
1128 | 1359 | phids = [] |
|
1129 | 1360 | if reviewers: |
|
1130 | phids.extend(userphids(repo, reviewers)) | |
|
1361 | phids.extend(userphids(repo.ui, reviewers)) | |
|
1131 | 1362 | if blockers: |
|
1132 | 1363 | phids.extend( |
|
1133 | map(lambda phid: b'blocking(%s)' % phid, userphids(repo, blockers)) | |
|
1364 | map( | |
|
1365 | lambda phid: b'blocking(%s)' % phid, | |
|
1366 | userphids(repo.ui, blockers), | |
|
1367 | ) | |
|
1134 | 1368 | ) |
|
1135 | 1369 | if phids: |
|
1136 | 1370 | actions.append({b'type': b'reviewers.add', b'value': phids}) |
@@ -1141,24 +1375,40 b' def phabsend(ui, repo, *revs, **opts):' | |||
|
1141 | 1375 | # Send patches one by one so we know their Differential Revision PHIDs and |
|
1142 | 1376 | # can provide dependency relationship |
|
1143 | 1377 | lastrevphid = None |
|
1144 |
for |
|
|
1145 | ui.debug(b'sending rev %d\n' % rev) | |
|
1146 | ctx = repo[rev] | |
|
1378 | for ctx in ctxs: | |
|
1379 | if fold: | |
|
1380 | ui.debug(b'sending rev %d::%d\n' % (ctx.rev(), ctxs[-1].rev())) | |
|
1381 | else: | |
|
1382 | ui.debug(b'sending rev %d\n' % ctx.rev()) | |
|
1147 | 1383 | |
|
1148 | 1384 | # Get Differential Revision ID |
|
1149 | 1385 | oldnode, olddiff, revid = oldmap.get(ctx.node(), (None, None, None)) |
|
1386 | oldbasenode, oldbasediff, oldbaserevid = oldnode, olddiff, revid | |
|
1387 | ||
|
1388 | if fold: | |
|
1389 | oldbasenode, oldbasediff, oldbaserevid = oldmap.get( | |
|
1390 | ctxs[-1].node(), (None, None, None) | |
|
1391 | ) | |
|
1392 | ||
|
1150 | 1393 | if oldnode != ctx.node() or opts.get(b'amend'): |
|
1151 | 1394 | # Create or update Differential Revision |
|
1152 | 1395 | revision, diff = createdifferentialrevision( |
|
1153 | ctx, | |
|
1396 | ctxs if fold else [ctx], | |
|
1154 | 1397 | revid, |
|
1155 | 1398 | lastrevphid, |
|
1399 | oldbasenode, | |
|
1156 | 1400 | oldnode, |
|
1157 | 1401 | olddiff, |
|
1158 | 1402 | actions, |
|
1159 | 1403 | opts.get(b'comment'), |
|
1160 | 1404 | ) |
|
1405 | ||
|
1406 | if fold: | |
|
1407 | for ctx in ctxs: | |
|
1161 | 1408 | diffmap[ctx.node()] = diff |
|
1409 | else: | |
|
1410 | diffmap[ctx.node()] = diff | |
|
1411 | ||
|
1162 | 1412 | newrevid = int(revision[b'object'][b'id']) |
|
1163 | 1413 | newrevphid = revision[b'object'][b'phid'] |
|
1164 | 1414 | if revid: |
@@ -1168,6 +1418,7 b' def phabsend(ui, repo, *revs, **opts):' | |||
|
1168 | 1418 | |
|
1169 | 1419 | # Create a local tag to note the association, if commit message |
|
1170 | 1420 | # does not have it already |
|
1421 | if not fold: | |
|
1171 | 1422 | m = _differentialrevisiondescre.search(ctx.description()) |
|
1172 | 1423 | if not m or int(m.group('id')) != newrevid: |
|
1173 | 1424 | tagname = b'D%d' % newrevid |
@@ -1183,41 +1434,59 b' def phabsend(ui, repo, *revs, **opts):' | |||
|
1183 | 1434 | else: |
|
1184 | 1435 | # Nothing changed. But still set "newrevphid" so the next revision |
|
1185 | 1436 | # could depend on this one and "newrevid" for the summary line. |
|
1186 | newrevphid = querydrev(repo, b'%d' % revid)[0][b'phid'] | |
|
1437 | newrevphid = querydrev(repo.ui, b'%d' % revid)[0][b'phid'] | |
|
1187 | 1438 | newrevid = revid |
|
1188 | 1439 | action = b'skipped' |
|
1189 | 1440 | |
|
1190 | actiondesc = ui.label( | |
|
1191 | { | |
|
1192 | b'created': _(b'created'), | |
|
1193 | b'skipped': _(b'skipped'), | |
|
1194 | b'updated': _(b'updated'), | |
|
1195 | }[action], | |
|
1196 | b'phabricator.action.%s' % action, | |
|
1197 | ) | |
|
1198 | drevdesc = ui.label(b'D%d' % newrevid, b'phabricator.drev') | |
|
1199 | nodedesc = ui.label(bytes(ctx), b'phabricator.node') | |
|
1200 | desc = ui.label(ctx.description().split(b'\n')[0], b'phabricator.desc') | |
|
1201 | ui.write( | |
|
1202 | _(b'%s - %s - %s: %s\n') % (drevdesc, actiondesc, nodedesc, desc) | |
|
1203 | ) | |
|
1204 | 1441 | drevids.append(newrevid) |
|
1205 | 1442 | lastrevphid = newrevphid |
|
1206 | 1443 | |
|
1444 | if fold: | |
|
1445 | for c in ctxs: | |
|
1446 | if oldmap.get(c.node(), (None, None, None))[2]: | |
|
1447 | action = b'updated' | |
|
1448 | else: | |
|
1449 | action = b'created' | |
|
1450 | _print_phabsend_action(ui, c, newrevid, action) | |
|
1451 | break | |
|
1452 | ||
|
1453 | _print_phabsend_action(ui, ctx, newrevid, action) | |
|
1454 | ||
|
1207 | 1455 | # Update commit messages and remove tags |
|
1208 | 1456 | if opts.get(b'amend'): |
|
1209 | 1457 | unfi = repo.unfiltered() |
|
1210 | 1458 | drevs = callconduit(ui, b'differential.query', {b'ids': drevids}) |
|
1211 | 1459 | with repo.wlock(), repo.lock(), repo.transaction(b'phabsend'): |
|
1460 | # Eagerly evaluate commits to restabilize before creating new | |
|
1461 | # commits. The selected revisions are excluded because they are | |
|
1462 | # automatically restacked as part of the submission process. | |
|
1463 | restack = [ | |
|
1464 | c | |
|
1465 | for c in repo.set( | |
|
1466 | b"(%ld::) - (%ld) - unstable() - obsolete() - public()", | |
|
1467 | revs, | |
|
1468 | revs, | |
|
1469 | ) | |
|
1470 | ] | |
|
1212 | 1471 | wnode = unfi[b'.'].node() |
|
1213 | 1472 | mapping = {} # {oldnode: [newnode]} |
|
1473 | newnodes = [] | |
|
1474 | ||
|
1475 | drevid = drevids[0] | |
|
1476 | ||
|
1214 | 1477 | for i, rev in enumerate(revs): |
|
1215 | 1478 | old = unfi[rev] |
|
1479 | if not fold: | |
|
1216 | 1480 | drevid = drevids[i] |
|
1217 | 1481 | drev = [d for d in drevs if int(d[b'id']) == drevid][0] |
|
1218 | newdesc = getdescfromdrev(drev) | |
|
1482 | ||
|
1483 | newdesc = get_amended_desc(drev, old, fold) | |
|
1219 | 1484 | # Make sure commit message contain "Differential Revision" |
|
1220 | if old.description() != newdesc: | |
|
1485 | if ( | |
|
1486 | old.description() != newdesc | |
|
1487 | or old.p1().node() in mapping | |
|
1488 | or old.p2().node() in mapping | |
|
1489 | ): | |
|
1221 | 1490 | if old.phase() == phases.public: |
|
1222 | 1491 | ui.warn( |
|
1223 | 1492 | _(b"warning: not updating public commit %s\n") |
@@ -1241,15 +1510,18 b' def phabsend(ui, repo, *revs, **opts):' | |||
|
1241 | 1510 | newnode = new.commit() |
|
1242 | 1511 | |
|
1243 | 1512 | mapping[old.node()] = [newnode] |
|
1244 | # Update diff property | |
|
1245 | # If it fails just warn and keep going, otherwise the DREV | |
|
1246 | # associations will be lost | |
|
1247 | try: | |
|
1248 | writediffproperties(unfi[newnode], diffmap[old.node()]) | |
|
1249 | except util.urlerr.urlerror: | |
|
1250 |
|
|
|
1251 | b'Failed to update metadata for D%d\n' % drevid | |
|
1513 | ||
|
1514 | if fold: | |
|
1515 | # Defer updating the (single) Diff until all nodes are | |
|
1516 | # collected. No tags were created, so none need to be | |
|
1517 | # removed. | |
|
1518 | newnodes.append(newnode) | |
|
1519 | continue | |
|
1520 | ||
|
1521 | _amend_diff_properties( | |
|
1522 | unfi, drevid, [newnode], diffmap[old.node()] | |
|
1252 | 1523 |
|
|
1524 | ||
|
1253 | 1525 | # Remove local tags since it's no longer necessary |
|
1254 | 1526 | tagname = b'D%d' % drevid |
|
1255 | 1527 | if tagname in repo.tags(): |
@@ -1262,6 +1534,69 b' def phabsend(ui, repo, *revs, **opts):' | |||
|
1262 | 1534 | date=None, |
|
1263 | 1535 | local=True, |
|
1264 | 1536 | ) |
|
1537 | elif fold: | |
|
1538 | # When folding multiple commits into one review with | |
|
1539 | # --fold, track even the commits that weren't amended, so | |
|
1540 | # that their association isn't lost if the properties are | |
|
1541 | # rewritten below. | |
|
1542 | newnodes.append(old.node()) | |
|
1543 | ||
|
1544 | # If the submitted commits are public, no amend takes place so | |
|
1545 | # there are no newnodes and therefore no diff update to do. | |
|
1546 | if fold and newnodes: | |
|
1547 | diff = diffmap[old.node()] | |
|
1548 | ||
|
1549 | # The diff object in diffmap doesn't have the local commits | |
|
1550 | # because that could be returned from differential.creatediff, | |
|
1551 | # not differential.querydiffs. So use the queried diff (if | |
|
1552 | # present), or force the amend (a new revision is being posted.) | |
|
1553 | if not olddiff or set(newnodes) != getlocalcommits(olddiff): | |
|
1554 | _debug(ui, b"updating local commit list for D%d\n" % drevid) | |
|
1555 | _amend_diff_properties(unfi, drevid, newnodes, diff) | |
|
1556 | else: | |
|
1557 | _debug( | |
|
1558 | ui, | |
|
1559 | b"local commit list for D%d is already up-to-date\n" | |
|
1560 | % drevid, | |
|
1561 | ) | |
|
1562 | elif fold: | |
|
1563 | _debug(ui, b"no newnodes to update\n") | |
|
1564 | ||
|
1565 | # Restack any children of first-time submissions that were orphaned | |
|
1566 | # in the process. The ctx won't report that it is an orphan until | |
|
1567 | # the cleanup takes place below. | |
|
1568 | for old in restack: | |
|
1569 | parents = [ | |
|
1570 | mapping.get(old.p1().node(), (old.p1(),))[0], | |
|
1571 | mapping.get(old.p2().node(), (old.p2(),))[0], | |
|
1572 | ] | |
|
1573 | new = context.metadataonlyctx( | |
|
1574 | repo, | |
|
1575 | old, | |
|
1576 | parents=parents, | |
|
1577 | text=old.description(), | |
|
1578 | user=old.user(), | |
|
1579 | date=old.date(), | |
|
1580 | extra=old.extra(), | |
|
1581 | ) | |
|
1582 | ||
|
1583 | newnode = new.commit() | |
|
1584 | ||
|
1585 | # Don't obsolete unselected descendants of nodes that have not | |
|
1586 | # been changed in this transaction- that results in an error. | |
|
1587 | if newnode != old.node(): | |
|
1588 | mapping[old.node()] = [newnode] | |
|
1589 | _debug( | |
|
1590 | ui, | |
|
1591 | b"restabilizing %s as %s\n" | |
|
1592 | % (short(old.node()), short(newnode)), | |
|
1593 | ) | |
|
1594 | else: | |
|
1595 | _debug( | |
|
1596 | ui, | |
|
1597 | b"not restabilizing unchanged %s\n" % short(old.node()), | |
|
1598 | ) | |
|
1599 | ||
|
1265 | 1600 | scmutil.cleanupnodes(repo, mapping, b'phabsend', fixphase=True) |
|
1266 | 1601 | if wnode in mapping: |
|
1267 | 1602 | unfi.setparents(mapping[wnode][0]) |
@@ -1398,7 +1733,7 b' def _prefetchdrevs(tree):' | |||
|
1398 | 1733 | return drevs, ancestordrevs |
|
1399 | 1734 | |
|
1400 | 1735 | |
|
1401 |
def querydrev( |
|
|
1736 | def querydrev(ui, spec): | |
|
1402 | 1737 | """return a list of "Differential Revision" dicts |
|
1403 | 1738 | |
|
1404 | 1739 | spec is a string using a simple query language, see docstring in phabread |
@@ -1407,46 +1742,49 b' def querydrev(repo, spec):' | |||
|
1407 | 1742 | A "Differential Revision dict" looks like: |
|
1408 | 1743 | |
|
1409 | 1744 | { |
|
1410 | "id": "2", | |
|
1411 | "phid": "PHID-DREV-672qvysjcczopag46qty", | |
|
1412 |
" |
|
|
1413 | "uri": "https://phab.example.com/D2", | |
|
1745 | "activeDiffPHID": "PHID-DIFF-xoqnjkobbm6k4dk6hi72", | |
|
1746 | "authorPHID": "PHID-USER-tv3ohwc4v4jeu34otlye", | |
|
1747 | "auxiliary": { | |
|
1748 | "phabricator:depends-on": [ | |
|
1749 | "PHID-DREV-gbapp366kutjebt7agcd" | |
|
1750 | ] | |
|
1751 | "phabricator:projects": [], | |
|
1752 | }, | |
|
1753 | "branch": "default", | |
|
1754 | "ccs": [], | |
|
1755 | "commits": [], | |
|
1414 | 1756 | "dateCreated": "1499181406", |
|
1415 | 1757 | "dateModified": "1499182103", |
|
1416 | "authorPHID": "PHID-USER-tv3ohwc4v4jeu34otlye", | |
|
1417 | "status": "0", | |
|
1418 | "statusName": "Needs Review", | |
|
1419 | "properties": [], | |
|
1420 | "branch": null, | |
|
1421 | "summary": "", | |
|
1422 | "testPlan": "", | |
|
1423 | "lineCount": "2", | |
|
1424 | "activeDiffPHID": "PHID-DIFF-xoqnjkobbm6k4dk6hi72", | |
|
1425 | 1758 | "diffs": [ |
|
1426 | 1759 | "3", |
|
1427 | 1760 | "4", |
|
1428 | 1761 | ], |
|
1429 |
" |
|
|
1762 | "hashes": [], | |
|
1763 | "id": "2", | |
|
1764 | "lineCount": "2", | |
|
1765 | "phid": "PHID-DREV-672qvysjcczopag46qty", | |
|
1766 | "properties": {}, | |
|
1767 | "repositoryPHID": "PHID-REPO-hub2hx62ieuqeheznasv", | |
|
1430 | 1768 | "reviewers": [], |
|
1431 | "ccs": [], | |
|
1432 | "hashes": [], | |
|
1433 | "auxiliary": { | |
|
1434 | "phabricator:projects": [], | |
|
1435 | "phabricator:depends-on": [ | |
|
1436 | "PHID-DREV-gbapp366kutjebt7agcd" | |
|
1437 | ] | |
|
1438 | }, | |
|
1439 | "repositoryPHID": "PHID-REPO-hub2hx62ieuqeheznasv", | |
|
1440 | 1769 | "sourcePath": null |
|
1770 | "status": "0", | |
|
1771 | "statusName": "Needs Review", | |
|
1772 | "summary": "", | |
|
1773 | "testPlan": "", | |
|
1774 | "title": "example", | |
|
1775 | "uri": "https://phab.example.com/D2", | |
|
1441 | 1776 | } |
|
1442 | 1777 | """ |
|
1778 | # TODO: replace differential.query and differential.querydiffs with | |
|
1779 | # differential.diff.search because the former (and their output) are | |
|
1780 | # frozen, and planned to be deprecated and removed. | |
|
1443 | 1781 | |
|
1444 | 1782 | def fetch(params): |
|
1445 | 1783 | """params -> single drev or None""" |
|
1446 | 1784 | key = (params.get(b'ids') or params.get(b'phids') or [None])[0] |
|
1447 | 1785 | if key in prefetched: |
|
1448 | 1786 | return prefetched[key] |
|
1449 |
drevs = callconduit( |
|
|
1787 | drevs = callconduit(ui, b'differential.query', params) | |
|
1450 | 1788 | # Fill prefetched with the result |
|
1451 | 1789 | for drev in drevs: |
|
1452 | 1790 | prefetched[drev[b'phid']] = drev |
@@ -1483,7 +1821,7 b' def querydrev(repo, spec):' | |||
|
1483 | 1821 | drevs, ancestordrevs = _prefetchdrevs(tree) |
|
1484 | 1822 | |
|
1485 | 1823 | # developer config: phabricator.batchsize |
|
1486 |
batchsize = |
|
|
1824 | batchsize = ui.configint(b'phabricator', b'batchsize') | |
|
1487 | 1825 | |
|
1488 | 1826 | # Prefetch Differential Revisions in batch |
|
1489 | 1827 | tofetch = set(drevs) |
@@ -1537,6 +1875,48 b' def getdescfromdrev(drev):' | |||
|
1537 | 1875 | return b'\n\n'.join(filter(None, [title, summary, testplan, uri])) |
|
1538 | 1876 | |
|
1539 | 1877 | |
|
1878 | def get_amended_desc(drev, ctx, folded): | |
|
1879 | """similar to ``getdescfromdrev``, but supports a folded series of commits | |
|
1880 | ||
|
1881 | This is used when determining if an individual commit needs to have its | |
|
1882 | message amended after posting it for review. The determination is made for | |
|
1883 | each individual commit, even when they were folded into one review. | |
|
1884 | """ | |
|
1885 | if not folded: | |
|
1886 | return getdescfromdrev(drev) | |
|
1887 | ||
|
1888 | uri = b'Differential Revision: %s' % drev[b'uri'] | |
|
1889 | ||
|
1890 | # Since the commit messages were combined when posting multiple commits | |
|
1891 | # with --fold, the fields can't be read from Phabricator here, or *all* | |
|
1892 | # affected local revisions will end up with the same commit message after | |
|
1893 | # the URI is amended in. Append in the DREV line, or update it if it | |
|
1894 | # exists. At worst, this means commit message or test plan updates on | |
|
1895 | # Phabricator aren't propagated back to the repository, but that seems | |
|
1896 | # reasonable for the case where local commits are effectively combined | |
|
1897 | # in Phabricator. | |
|
1898 | m = _differentialrevisiondescre.search(ctx.description()) | |
|
1899 | if not m: | |
|
1900 | return b'\n\n'.join([ctx.description(), uri]) | |
|
1901 | ||
|
1902 | return _differentialrevisiondescre.sub(uri, ctx.description()) | |
|
1903 | ||
|
1904 | ||
|
1905 | def getlocalcommits(diff): | |
|
1906 | """get the set of local commits from a diff object | |
|
1907 | ||
|
1908 | See ``getdiffmeta()`` for an example diff object. | |
|
1909 | """ | |
|
1910 | props = diff.get(b'properties') or {} | |
|
1911 | commits = props.get(b'local:commits') or {} | |
|
1912 | if len(commits) > 1: | |
|
1913 | return {bin(c) for c in commits.keys()} | |
|
1914 | ||
|
1915 | # Storing the diff metadata predates storing `local:commits`, so continue | |
|
1916 | # to use that in the --no-fold case. | |
|
1917 | return {bin(getdiffmeta(diff).get(b'node', b'')) or None} | |
|
1918 | ||
|
1919 | ||
|
1540 | 1920 | def getdiffmeta(diff): |
|
1541 | 1921 | """get commit metadata (date, node, user, p1) from a diff object |
|
1542 | 1922 | |
@@ -1544,6 +1924,7 b' def getdiffmeta(diff):' | |||
|
1544 | 1924 | |
|
1545 | 1925 | "properties": { |
|
1546 | 1926 | "hg:meta": { |
|
1927 | "branch": "default", | |
|
1547 | 1928 | "date": "1499571514 25200", |
|
1548 | 1929 | "node": "98c08acae292b2faf60a279b4189beb6cff1414d", |
|
1549 | 1930 | "user": "Foo Bar <foo@example.com>", |
@@ -1557,16 +1938,16 b' def getdiffmeta(diff):' | |||
|
1557 | 1938 | "local:commits": { |
|
1558 | 1939 | "98c08acae292b2faf60a279b4189beb6cff1414d": { |
|
1559 | 1940 | "author": "Foo Bar", |
|
1560 | "time": 1499546314, | |
|
1941 | "authorEmail": "foo@example.com" | |
|
1561 | 1942 | "branch": "default", |
|
1562 | "tag": "", | |
|
1563 | 1943 | "commit": "98c08acae292b2faf60a279b4189beb6cff1414d", |
|
1944 | "local": "1000", | |
|
1945 | "message": "...", | |
|
1946 | "parents": ["6d0abad76b30e4724a37ab8721d630394070fe16"], | |
|
1564 | 1947 | "rev": "98c08acae292b2faf60a279b4189beb6cff1414d", |
|
1565 | "local": "1000", | |
|
1566 | "parents": ["6d0abad76b30e4724a37ab8721d630394070fe16"], | |
|
1567 | 1948 | "summary": "...", |
|
1568 |
" |
|
|
1569 | "authorEmail": "foo@example.com" | |
|
1949 | "tag": "", | |
|
1950 | "time": 1499546314, | |
|
1570 | 1951 | } |
|
1571 | 1952 | } |
|
1572 | 1953 | } |
@@ -1605,24 +1986,47 b' def getdiffmeta(diff):' | |||
|
1605 | 1986 | return meta |
|
1606 | 1987 | |
|
1607 | 1988 | |
|
1608 | def readpatch(repo, drevs, write): | |
|
1989 | def _getdrevs(ui, stack, specs): | |
|
1990 | """convert user supplied DREVSPECs into "Differential Revision" dicts | |
|
1991 | ||
|
1992 | See ``hg help phabread`` for how to specify each DREVSPEC. | |
|
1993 | """ | |
|
1994 | if len(specs) > 0: | |
|
1995 | ||
|
1996 | def _formatspec(s): | |
|
1997 | if stack: | |
|
1998 | s = b':(%s)' % s | |
|
1999 | return b'(%s)' % s | |
|
2000 | ||
|
2001 | spec = b'+'.join(pycompat.maplist(_formatspec, specs)) | |
|
2002 | ||
|
2003 | drevs = querydrev(ui, spec) | |
|
2004 | if drevs: | |
|
2005 | return drevs | |
|
2006 | ||
|
2007 | raise error.Abort(_(b"empty DREVSPEC set")) | |
|
2008 | ||
|
2009 | ||
|
2010 | def readpatch(ui, drevs, write): | |
|
1609 | 2011 | """generate plain-text patch readable by 'hg import' |
|
1610 | 2012 | |
|
1611 | write is usually ui.write. drevs is what "querydrev" returns, results of | |
|
2013 | write takes a list of (DREV, bytes), where DREV is the differential number | |
|
2014 | (as bytes, without the "D" prefix) and the bytes are the text of a patch | |
|
2015 | to be imported. drevs is what "querydrev" returns, results of | |
|
1612 | 2016 | "differential.query". |
|
1613 | 2017 | """ |
|
1614 | 2018 | # Prefetch hg:meta property for all diffs |
|
1615 |
diffids = sorted( |
|
|
1616 |
diffs = callconduit( |
|
|
2019 | diffids = sorted({max(int(v) for v in drev[b'diffs']) for drev in drevs}) | |
|
2020 | diffs = callconduit(ui, b'differential.querydiffs', {b'ids': diffids}) | |
|
2021 | ||
|
2022 | patches = [] | |
|
1617 | 2023 | |
|
1618 | 2024 | # Generate patch for each drev |
|
1619 | 2025 | for drev in drevs: |
|
1620 |
|
|
|
2026 | ui.note(_(b'reading D%s\n') % drev[b'id']) | |
|
1621 | 2027 | |
|
1622 | 2028 | diffid = max(int(v) for v in drev[b'diffs']) |
|
1623 | body = callconduit( | |
|
1624 | repo.ui, b'differential.getrawdiff', {b'diffID': diffid} | |
|
1625 | ) | |
|
2029 | body = callconduit(ui, b'differential.getrawdiff', {b'diffID': diffid}) | |
|
1626 | 2030 | desc = getdescfromdrev(drev) |
|
1627 | 2031 | header = b'# HG changeset patch\n' |
|
1628 | 2032 | |
@@ -1635,22 +2039,28 b' def readpatch(repo, drevs, write):' | |||
|
1635 | 2039 | header += b'# %s %s\n' % (_metanamemap[k], meta[k]) |
|
1636 | 2040 | |
|
1637 | 2041 | content = b'%s%s\n%s' % (header, desc, body) |
|
1638 | write(content) | |
|
2042 | patches.append((drev[b'id'], content)) | |
|
2043 | ||
|
2044 | # Write patches to the supplied callback | |
|
2045 | write(patches) | |
|
1639 | 2046 | |
|
1640 | 2047 | |
|
1641 | 2048 | @vcrcommand( |
|
1642 | 2049 | b'phabread', |
|
1643 | 2050 | [(b'', b'stack', False, _(b'read dependencies'))], |
|
1644 | _(b'DREVSPEC [OPTIONS]'), | |
|
2051 | _(b'DREVSPEC... [OPTIONS]'), | |
|
1645 | 2052 | helpcategory=command.CATEGORY_IMPORT_EXPORT, |
|
2053 | optionalrepo=True, | |
|
1646 | 2054 | ) |
|
1647 | def phabread(ui, repo, spec, **opts): | |
|
2055 | def phabread(ui, repo, *specs, **opts): | |
|
1648 | 2056 | """print patches from Phabricator suitable for importing |
|
1649 | 2057 | |
|
1650 | 2058 | DREVSPEC could be a Differential Revision identity, like ``D123``, or just |
|
1651 | 2059 | the number ``123``. It could also have common operators like ``+``, ``-``, |
|
1652 | 2060 | ``&``, ``(``, ``)`` for complex queries. Prefix ``:`` could be used to |
|
1653 | select a stack. | |
|
2061 | select a stack. If multiple DREVSPEC values are given, the result is the | |
|
2062 | union of each individually evaluated value. No attempt is currently made | |
|
2063 | to reorder the values to run from parent to child. | |
|
1654 | 2064 | |
|
1655 | 2065 | ``abandoned``, ``accepted``, ``closed``, ``needsreview``, ``needsrevision`` |
|
1656 | 2066 | could be used to filter patches by status. For performance reason, they |
@@ -1664,10 +2074,74 b' def phabread(ui, repo, spec, **opts):' | |||
|
1664 | 2074 | It is equivalent to the ``:`` operator. |
|
1665 | 2075 | """ |
|
1666 | 2076 | opts = pycompat.byteskwargs(opts) |
|
1667 |
|
|
|
1668 | spec = b':(%s)' % spec | |
|
1669 | drevs = querydrev(repo, spec) | |
|
1670 | readpatch(repo, drevs, ui.write) | |
|
2077 | drevs = _getdrevs(ui, opts.get(b'stack'), specs) | |
|
2078 | ||
|
2079 | def _write(patches): | |
|
2080 | for drev, content in patches: | |
|
2081 | ui.write(content) | |
|
2082 | ||
|
2083 | readpatch(ui, drevs, _write) | |
|
2084 | ||
|
2085 | ||
|
2086 | @vcrcommand( | |
|
2087 | b'phabimport', | |
|
2088 | [(b'', b'stack', False, _(b'import dependencies as well'))], | |
|
2089 | _(b'DREVSPEC... [OPTIONS]'), | |
|
2090 | helpcategory=command.CATEGORY_IMPORT_EXPORT, | |
|
2091 | ) | |
|
2092 | def phabimport(ui, repo, *specs, **opts): | |
|
2093 | """import patches from Phabricator for the specified Differential Revisions | |
|
2094 | ||
|
2095 | The patches are read and applied starting at the parent of the working | |
|
2096 | directory. | |
|
2097 | ||
|
2098 | See ``hg help phabread`` for how to specify DREVSPEC. | |
|
2099 | """ | |
|
2100 | opts = pycompat.byteskwargs(opts) | |
|
2101 | ||
|
2102 | # --bypass avoids losing exec and symlink bits when importing on Windows, | |
|
2103 | # and allows importing with a dirty wdir. It also aborts instead of leaving | |
|
2104 | # rejects. | |
|
2105 | opts[b'bypass'] = True | |
|
2106 | ||
|
2107 | # Mandatory default values, synced with commands.import | |
|
2108 | opts[b'strip'] = 1 | |
|
2109 | opts[b'prefix'] = b'' | |
|
2110 | # Evolve 9.3.0 assumes this key is present in cmdutil.tryimportone() | |
|
2111 | opts[b'obsolete'] = False | |
|
2112 | ||
|
2113 | if ui.configbool(b'phabimport', b'secret'): | |
|
2114 | opts[b'secret'] = True | |
|
2115 | if ui.configbool(b'phabimport', b'obsolete'): | |
|
2116 | opts[b'obsolete'] = True # Handled by evolve wrapping tryimportone() | |
|
2117 | ||
|
2118 | def _write(patches): | |
|
2119 | parents = repo[None].parents() | |
|
2120 | ||
|
2121 | with repo.wlock(), repo.lock(), repo.transaction(b'phabimport'): | |
|
2122 | for drev, contents in patches: | |
|
2123 | ui.status(_(b'applying patch from D%s\n') % drev) | |
|
2124 | ||
|
2125 | with patch.extract(ui, pycompat.bytesio(contents)) as patchdata: | |
|
2126 | msg, node, rej = cmdutil.tryimportone( | |
|
2127 | ui, | |
|
2128 | repo, | |
|
2129 | patchdata, | |
|
2130 | parents, | |
|
2131 | opts, | |
|
2132 | [], | |
|
2133 | None, # Never update wdir to another revision | |
|
2134 | ) | |
|
2135 | ||
|
2136 | if not node: | |
|
2137 | raise error.Abort(_(b'D%s: no diffs found') % drev) | |
|
2138 | ||
|
2139 | ui.note(msg + b'\n') | |
|
2140 | parents = [repo[node]] | |
|
2141 | ||
|
2142 | drevs = _getdrevs(ui, opts.get(b'stack'), specs) | |
|
2143 | ||
|
2144 | readpatch(repo.ui, drevs, _write) | |
|
1671 | 2145 | |
|
1672 | 2146 | |
|
1673 | 2147 | @vcrcommand( |
@@ -1679,10 +2153,11 b' def phabread(ui, repo, spec, **opts):' | |||
|
1679 | 2153 | (b'', b'reclaim', False, _(b'reclaim revisions')), |
|
1680 | 2154 | (b'm', b'comment', b'', _(b'comment on the last revision')), |
|
1681 | 2155 | ], |
|
1682 | _(b'DREVSPEC [OPTIONS]'), | |
|
2156 | _(b'DREVSPEC... [OPTIONS]'), | |
|
1683 | 2157 | helpcategory=command.CATEGORY_IMPORT_EXPORT, |
|
2158 | optionalrepo=True, | |
|
1684 | 2159 | ) |
|
1685 | def phabupdate(ui, repo, spec, **opts): | |
|
2160 | def phabupdate(ui, repo, *specs, **opts): | |
|
1686 | 2161 | """update Differential Revision in batch |
|
1687 | 2162 | |
|
1688 | 2163 | DREVSPEC selects revisions. See :hg:`help phabread` for its usage. |
@@ -1696,7 +2171,7 b' def phabupdate(ui, repo, spec, **opts):' | |||
|
1696 | 2171 | for f in flags: |
|
1697 | 2172 | actions.append({b'type': f, b'value': True}) |
|
1698 | 2173 | |
|
1699 |
drevs = |
|
|
2174 | drevs = _getdrevs(ui, opts.get(b'stack'), specs) | |
|
1700 | 2175 | for i, drev in enumerate(drevs): |
|
1701 | 2176 | if i + 1 == len(drevs) and opts.get(b'comment'): |
|
1702 | 2177 | actions.append({b'type': b'comment', b'value': opts[b'comment']}) |
@@ -1759,11 +2234,11 b' def phabstatusshowview(ui, repo, display' | |||
|
1759 | 2234 | """Phabricator differiential status""" |
|
1760 | 2235 | revs = repo.revs('sort(_underway(), topo)') |
|
1761 | 2236 | drevmap = getdrevmap(repo, revs) |
|
1762 |
unknownrevs, drevids, revsbydrevid = [], set( |
|
|
2237 | unknownrevs, drevids, revsbydrevid = [], set(), {} | |
|
1763 | 2238 | for rev, drevid in pycompat.iteritems(drevmap): |
|
1764 | 2239 | if drevid is not None: |
|
1765 | 2240 | drevids.add(drevid) |
|
1766 |
revsbydrevid.setdefault(drevid, set( |
|
|
2241 | revsbydrevid.setdefault(drevid, set()).add(rev) | |
|
1767 | 2242 | else: |
|
1768 | 2243 | unknownrevs.append(rev) |
|
1769 | 2244 |
@@ -48,6 +48,7 b" testedwith = b'ships-with-hg-core'" | |||
|
48 | 48 | [ |
|
49 | 49 | (b'a', b'abort-on-err', None, _(b'abort if an error occurs')), |
|
50 | 50 | (b'', b'all', None, _(b'purge ignored files too')), |
|
51 | (b'i', b'ignored', None, _(b'purge only ignored files')), | |
|
51 | 52 | (b'', b'dirs', None, _(b'purge empty directories')), |
|
52 | 53 | (b'', b'files', None, _(b'purge files')), |
|
53 | 54 | (b'p', b'print', None, _(b'print filenames instead of deleting them')), |
@@ -80,7 +81,7 b' def purge(ui, repo, *dirs, **opts):' | |||
|
80 | 81 | But it will leave untouched: |
|
81 | 82 | |
|
82 | 83 | - Modified and unmodified tracked files |
|
83 | - Ignored files (unless --all is specified) | |
|
84 | - Ignored files (unless -i or --all is specified) | |
|
84 | 85 | - New files added to the repository (with :hg:`add`) |
|
85 | 86 | |
|
86 | 87 | The --files and --dirs options can be used to direct purge to delete |
@@ -96,12 +97,19 b' def purge(ui, repo, *dirs, **opts):' | |||
|
96 | 97 | option. |
|
97 | 98 | ''' |
|
98 | 99 | opts = pycompat.byteskwargs(opts) |
|
100 | cmdutil.check_at_most_one_arg(opts, b'all', b'ignored') | |
|
99 | 101 | |
|
100 | 102 | act = not opts.get(b'print') |
|
101 | 103 | eol = b'\n' |
|
102 | 104 | if opts.get(b'print0'): |
|
103 | 105 | eol = b'\0' |
|
104 | 106 | act = False # --print0 implies --print |
|
107 | if opts.get(b'all', False): | |
|
108 | ignored = True | |
|
109 | unknown = True | |
|
110 | else: | |
|
111 | ignored = opts.get(b'ignored', False) | |
|
112 | unknown = not ignored | |
|
105 | 113 | |
|
106 | 114 | removefiles = opts.get(b'files') |
|
107 | 115 | removedirs = opts.get(b'dirs') |
@@ -115,7 +123,8 b' def purge(ui, repo, *dirs, **opts):' | |||
|
115 | 123 | paths = mergemod.purge( |
|
116 | 124 | repo, |
|
117 | 125 | match, |
|
118 | ignored=opts.get(b'all', False), | |
|
126 | unknown=unknown, | |
|
127 | ignored=ignored, | |
|
119 | 128 | removeemptydirs=removedirs, |
|
120 | 129 | removefiles=removefiles, |
|
121 | 130 | abortonerror=opts.get(b'abort_on_err'), |
@@ -37,6 +37,7 b' from mercurial import (' | |||
|
37 | 37 | hg, |
|
38 | 38 | merge as mergemod, |
|
39 | 39 | mergeutil, |
|
40 | node as nodemod, | |
|
40 | 41 | obsolete, |
|
41 | 42 | obsutil, |
|
42 | 43 | patch, |
@@ -177,6 +178,7 b' class rebaseruntime(object):' | |||
|
177 | 178 | # --continue or --abort)), the original repo should be used so |
|
178 | 179 | # visibility-dependent revsets are correct. |
|
179 | 180 | self.prepared = False |
|
181 | self.resume = False | |
|
180 | 182 | self._repo = repo |
|
181 | 183 | |
|
182 | 184 | self.ui = ui |
@@ -366,6 +368,7 b' class rebaseruntime(object):' | |||
|
366 | 368 | _checkobsrebase(self.repo, self.ui, obsoleteset, skippedset) |
|
367 | 369 | |
|
368 | 370 | def _prepareabortorcontinue(self, isabort, backup=True, suppwarns=False): |
|
371 | self.resume = True | |
|
369 | 372 | try: |
|
370 | 373 | self.restorestatus() |
|
371 | 374 | self.collapsemsg = restorecollapsemsg(self.repo, isabort) |
@@ -503,7 +506,7 b' class rebaseruntime(object):' | |||
|
503 | 506 | p.complete() |
|
504 | 507 | ui.note(_(b'rebase merging completed\n')) |
|
505 | 508 | |
|
506 |
def _concludenode(self, rev, p1, |
|
|
509 | def _concludenode(self, rev, p1, editor, commitmsg=None): | |
|
507 | 510 | '''Commit the wd changes with parents p1 and p2. |
|
508 | 511 | |
|
509 | 512 | Reuse commit info from rev but also store useful information in extra. |
@@ -527,8 +530,6 b' class rebaseruntime(object):' | |||
|
527 | 530 | if self.inmemory: |
|
528 | 531 | newnode = commitmemorynode( |
|
529 | 532 | repo, |
|
530 | p1, | |
|
531 | p2, | |
|
532 | 533 | wctx=self.wctx, |
|
533 | 534 | extra=extra, |
|
534 | 535 | commitmsg=commitmsg, |
@@ -540,8 +541,6 b' class rebaseruntime(object):' | |||
|
540 | 541 | else: |
|
541 | 542 | newnode = commitnode( |
|
542 | 543 | repo, |
|
543 | p1, | |
|
544 | p2, | |
|
545 | 544 | extra=extra, |
|
546 | 545 | commitmsg=commitmsg, |
|
547 | 546 | editor=editor, |
@@ -549,11 +548,6 b' class rebaseruntime(object):' | |||
|
549 | 548 | date=date, |
|
550 | 549 | ) |
|
551 | 550 | |
|
552 | if newnode is None: | |
|
553 | # If it ended up being a no-op commit, then the normal | |
|
554 | # merge state clean-up path doesn't happen, so do it | |
|
555 | # here. Fix issue5494 | |
|
556 | mergemod.mergestate.clean(repo) | |
|
557 | 551 | return newnode |
|
558 | 552 | |
|
559 | 553 | def _rebasenode(self, tr, rev, allowdivergence, progressfn): |
@@ -605,8 +599,9 b' class rebaseruntime(object):' | |||
|
605 | 599 | self.skipped, |
|
606 | 600 | self.obsoletenotrebased, |
|
607 | 601 | ) |
|
608 | if not self.inmemory and len(repo[None].parents()) == 2: | |
|
602 | if self.resume and self.wctx.p1().rev() == p1: | |
|
609 | 603 | repo.ui.debug(b'resuming interrupted rebase\n') |
|
604 | self.resume = False | |
|
610 | 605 | else: |
|
611 | 606 | overrides = {(b'ui', b'forcemerge'): opts.get(b'tool', b'')} |
|
612 | 607 | with ui.configoverride(overrides, b'rebase'): |
@@ -614,6 +609,7 b' class rebaseruntime(object):' | |||
|
614 | 609 | repo, |
|
615 | 610 | rev, |
|
616 | 611 | p1, |
|
612 | p2, | |
|
617 | 613 | base, |
|
618 | 614 | self.collapsef, |
|
619 | 615 | dest, |
@@ -635,13 +631,15 b' class rebaseruntime(object):' | |||
|
635 | 631 | editor = cmdutil.getcommiteditor( |
|
636 | 632 | editform=editform, **pycompat.strkwargs(opts) |
|
637 | 633 | ) |
|
638 | newnode = self._concludenode(rev, p1, p2, editor) | |
|
634 | # We need to set parents again here just in case we're continuing | |
|
635 | # a rebase started with an old hg version (before 9c9cfecd4600), | |
|
636 | # because those old versions would have left us with two dirstate | |
|
637 | # parents, and we don't want to create a merge commit here (unless | |
|
638 | # we're rebasing a merge commit). | |
|
639 | self.wctx.setparents(repo[p1].node(), repo[p2].node()) | |
|
640 | newnode = self._concludenode(rev, p1, editor) | |
|
639 | 641 | else: |
|
640 | 642 | # Skip commit if we are collapsing |
|
641 | if self.inmemory: | |
|
642 | self.wctx.setbase(repo[p1]) | |
|
643 | else: | |
|
644 | repo.setparents(repo[p1].node()) | |
|
645 | 643 | newnode = None |
|
646 | 644 | # Update the state |
|
647 | 645 | if newnode is not None: |
@@ -696,8 +694,9 b' class rebaseruntime(object):' | |||
|
696 | 694 | editor = cmdutil.getcommiteditor(edit=editopt, editform=editform) |
|
697 | 695 | revtoreuse = max(self.state) |
|
698 | 696 | |
|
697 | self.wctx.setparents(repo[p1].node(), repo[self.external].node()) | |
|
699 | 698 | newnode = self._concludenode( |
|
700 |
revtoreuse, p1 |
|
|
699 | revtoreuse, p1, editor, commitmsg=commitmsg | |
|
701 | 700 | ) |
|
702 | 701 | |
|
703 | 702 | if newnode is not None: |
@@ -799,9 +798,7 b' class rebaseruntime(object):' | |||
|
799 | 798 | |
|
800 | 799 | # Update away from the rebase if necessary |
|
801 | 800 | if shouldupdate: |
|
802 | mergemod.update( | |
|
803 | repo, self.originalwd, branchmerge=False, force=True | |
|
804 | ) | |
|
801 | mergemod.clean_update(repo[self.originalwd]) | |
|
805 | 802 | |
|
806 | 803 | # Strip from the first rebased revision |
|
807 | 804 | if rebased: |
@@ -824,14 +821,14 b' class rebaseruntime(object):' | |||
|
824 | 821 | ( |
|
825 | 822 | b's', |
|
826 | 823 | b'source', |
|
827 |
|
|
|
828 | _(b'rebase the specified changeset and descendants'), | |
|
824 | [], | |
|
825 | _(b'rebase the specified changesets and their descendants'), | |
|
829 | 826 | _(b'REV'), |
|
830 | 827 | ), |
|
831 | 828 | ( |
|
832 | 829 | b'b', |
|
833 | 830 | b'base', |
|
834 |
|
|
|
831 | [], | |
|
835 | 832 | _(b'rebase everything from branching point of specified changeset'), |
|
836 | 833 | _(b'REV'), |
|
837 | 834 | ), |
@@ -880,7 +877,7 b' class rebaseruntime(object):' | |||
|
880 | 877 | + cmdutil.dryrunopts |
|
881 | 878 | + cmdutil.formatteropts |
|
882 | 879 | + cmdutil.confirmopts, |
|
883 | _(b'[-s REV | -b REV] [-d REV] [OPTION]'), | |
|
880 | _(b'[[-s REV]... | [-b REV]... | [-r REV]...] [-d REV] [OPTION]...'), | |
|
884 | 881 | helpcategory=command.CATEGORY_CHANGE_MANAGEMENT, |
|
885 | 882 | ) |
|
886 | 883 | def rebase(ui, repo, **opts): |
@@ -1011,10 +1008,10 b' def rebase(ui, repo, **opts):' | |||
|
1011 | 1008 | action = cmdutil.check_at_most_one_arg(opts, b'abort', b'stop', b'continue') |
|
1012 | 1009 | if action: |
|
1013 | 1010 | cmdutil.check_incompatible_arguments( |
|
1014 | opts, action, b'confirm', b'dry_run' | |
|
1011 | opts, action, [b'confirm', b'dry_run'] | |
|
1015 | 1012 | ) |
|
1016 | 1013 | cmdutil.check_incompatible_arguments( |
|
1017 | opts, action, b'rev', b'source', b'base', b'dest' | |
|
1014 | opts, action, [b'rev', b'source', b'base', b'dest'] | |
|
1018 | 1015 | ) |
|
1019 | 1016 | cmdutil.check_at_most_one_arg(opts, b'confirm', b'dry_run') |
|
1020 | 1017 | cmdutil.check_at_most_one_arg(opts, b'rev', b'source', b'base') |
@@ -1028,7 +1025,7 b' def rebase(ui, repo, **opts):' | |||
|
1028 | 1025 | if opts.get(b'auto_orphans'): |
|
1029 | 1026 | disallowed_opts = set(opts) - {b'auto_orphans'} |
|
1030 | 1027 | cmdutil.check_incompatible_arguments( |
|
1031 |
opts, b'auto_orphans', |
|
|
1028 | opts, b'auto_orphans', disallowed_opts | |
|
1032 | 1029 | ) |
|
1033 | 1030 | |
|
1034 | 1031 | userrevs = list(repo.revs(opts.get(b'auto_orphans'))) |
@@ -1195,8 +1192,8 b' def _origrebase(' | |||
|
1195 | 1192 | repo, |
|
1196 | 1193 | inmemory, |
|
1197 | 1194 | opts.get(b'dest', None), |
|
1198 |
opts.get(b'source', |
|
|
1199 |
opts.get(b'base', |
|
|
1195 | opts.get(b'source', []), | |
|
1196 | opts.get(b'base', []), | |
|
1200 | 1197 | opts.get(b'rev', []), |
|
1201 | 1198 | destspace=destspace, |
|
1202 | 1199 | ) |
@@ -1226,16 +1223,7 b' def _origrebase(' | |||
|
1226 | 1223 | rbsrt._finishrebase() |
|
1227 | 1224 | |
|
1228 | 1225 | |
|
1229 | def _definedestmap( | |
|
1230 | ui, | |
|
1231 | repo, | |
|
1232 | inmemory, | |
|
1233 | destf=None, | |
|
1234 | srcf=None, | |
|
1235 | basef=None, | |
|
1236 | revf=None, | |
|
1237 | destspace=None, | |
|
1238 | ): | |
|
1226 | def _definedestmap(ui, repo, inmemory, destf, srcf, basef, revf, destspace): | |
|
1239 | 1227 | """use revisions argument to define destmap {srcrev: destrev}""" |
|
1240 | 1228 | if revf is None: |
|
1241 | 1229 | revf = [] |
@@ -1261,14 +1249,14 b' def _definedestmap(' | |||
|
1261 | 1249 | ui.status(_(b'empty "rev" revision set - nothing to rebase\n')) |
|
1262 | 1250 | return None |
|
1263 | 1251 | elif srcf: |
|
1264 |
src = scmutil.revrange(repo, |
|
|
1252 | src = scmutil.revrange(repo, srcf) | |
|
1265 | 1253 | if not src: |
|
1266 | 1254 | ui.status(_(b'empty "source" revision set - nothing to rebase\n')) |
|
1267 | 1255 | return None |
|
1268 | rebaseset = repo.revs(b'(%ld)::', src) | |
|
1269 | assert rebaseset | |
|
1256 | # `+ (%ld)` to work around `wdir()::` being empty | |
|
1257 | rebaseset = repo.revs(b'(%ld):: + (%ld)', src, src) | |
|
1270 | 1258 | else: |
|
1271 |
base = scmutil.revrange(repo, |
|
|
1259 | base = scmutil.revrange(repo, basef or [b'.']) | |
|
1272 | 1260 | if not base: |
|
1273 | 1261 | ui.status( |
|
1274 | 1262 | _(b'empty "base" revision set - ' b"can't compute rebase set\n") |
@@ -1341,6 +1329,8 b' def _definedestmap(' | |||
|
1341 | 1329 | ) |
|
1342 | 1330 | return None |
|
1343 | 1331 | |
|
1332 | if nodemod.wdirrev in rebaseset: | |
|
1333 | raise error.Abort(_(b'cannot rebase the working copy')) | |
|
1344 | 1334 | rebasingwcp = repo[b'.'].rev() in rebaseset |
|
1345 | 1335 | ui.log( |
|
1346 | 1336 | b"rebase", |
@@ -1420,7 +1410,7 b' def externalparent(repo, state, destance' | |||
|
1420 | 1410 | ) |
|
1421 | 1411 | |
|
1422 | 1412 | |
|
1423 |
def commitmemorynode(repo |
|
|
1413 | def commitmemorynode(repo, wctx, editor, extra, user, date, commitmsg): | |
|
1424 | 1414 | '''Commit the memory changes with parents p1 and p2. |
|
1425 | 1415 | Return node of committed revision.''' |
|
1426 | 1416 | # Replicates the empty check in ``repo.commit``. |
@@ -1433,7 +1423,6 b' def commitmemorynode(repo, p1, p2, wctx,' | |||
|
1433 | 1423 | if b'branch' in extra: |
|
1434 | 1424 | branch = extra[b'branch'] |
|
1435 | 1425 | |
|
1436 | wctx.setparents(repo[p1].node(), repo[p2].node()) | |
|
1437 | 1426 | memctx = wctx.tomemctx( |
|
1438 | 1427 | commitmsg, |
|
1439 | 1428 | date=date, |
@@ -1447,15 +1436,13 b' def commitmemorynode(repo, p1, p2, wctx,' | |||
|
1447 | 1436 | return commitres |
|
1448 | 1437 | |
|
1449 | 1438 | |
|
1450 |
def commitnode(repo |
|
|
1439 | def commitnode(repo, editor, extra, user, date, commitmsg): | |
|
1451 | 1440 | '''Commit the wd changes with parents p1 and p2. |
|
1452 | 1441 | Return node of committed revision.''' |
|
1453 | 1442 | dsguard = util.nullcontextmanager() |
|
1454 | 1443 | if not repo.ui.configbool(b'rebase', b'singletransaction'): |
|
1455 | 1444 | dsguard = dirstateguard.dirstateguard(repo, b'rebase') |
|
1456 | 1445 | with dsguard: |
|
1457 | repo.setparents(repo[p1].node(), repo[p2].node()) | |
|
1458 | ||
|
1459 | 1446 | # Commit might fail if unresolved files exist |
|
1460 | 1447 | newnode = repo.commit( |
|
1461 | 1448 | text=commitmsg, user=user, date=date, extra=extra, editor=editor |
@@ -1465,7 +1452,7 b' def commitnode(repo, p1, p2, editor, ext' | |||
|
1465 | 1452 | return newnode |
|
1466 | 1453 | |
|
1467 | 1454 | |
|
1468 | def rebasenode(repo, rev, p1, base, collapse, dest, wctx): | |
|
1455 | def rebasenode(repo, rev, p1, p2, base, collapse, dest, wctx): | |
|
1469 | 1456 | """Rebase a single revision rev on top of p1 using base as merge ancestor""" |
|
1470 | 1457 | # Merge phase |
|
1471 | 1458 | # Update to destination and merge it with local |
@@ -1475,7 +1462,7 b' def rebasenode(repo, rev, p1, base, coll' | |||
|
1475 | 1462 | else: |
|
1476 | 1463 | if repo[b'.'].rev() != p1: |
|
1477 | 1464 | repo.ui.debug(b" update to %d:%s\n" % (p1, p1ctx)) |
|
1478 | mergemod.update(repo, p1, branchmerge=False, force=True) | |
|
1465 | mergemod.clean_update(p1ctx) | |
|
1479 | 1466 | else: |
|
1480 | 1467 | repo.ui.debug(b" already in destination\n") |
|
1481 | 1468 | # This is, alas, necessary to invalidate workingctx's manifest cache, |
@@ -1499,6 +1486,7 b' def rebasenode(repo, rev, p1, base, coll' | |||
|
1499 | 1486 | labels=[b'dest', b'source'], |
|
1500 | 1487 | wc=wctx, |
|
1501 | 1488 | ) |
|
1489 | wctx.setparents(p1ctx.node(), repo[p2].node()) | |
|
1502 | 1490 | if collapse: |
|
1503 | 1491 | copies.graftcopies(wctx, ctx, repo[dest]) |
|
1504 | 1492 | else: |
@@ -1678,22 +1666,6 b' def defineparents(repo, rev, destmap, st' | |||
|
1678 | 1666 | elif p in state and state[p] > 0: |
|
1679 | 1667 | np = state[p] |
|
1680 | 1668 | |
|
1681 | # "bases" only record "special" merge bases that cannot be | |
|
1682 | # calculated from changelog DAG (i.e. isancestor(p, np) is False). | |
|
1683 | # For example: | |
|
1684 | # | |
|
1685 | # B' # rebase -s B -d D, when B was rebased to B'. dest for C | |
|
1686 | # | C # is B', but merge base for C is B, instead of | |
|
1687 | # D | # changelog.ancestor(C, B') == A. If changelog DAG and | |
|
1688 | # | B # "state" edges are merged (so there will be an edge from | |
|
1689 | # |/ # B to B'), the merge base is still ancestor(C, B') in | |
|
1690 | # A # the merged graph. | |
|
1691 | # | |
|
1692 | # Also see https://bz.mercurial-scm.org/show_bug.cgi?id=1950#c8 | |
|
1693 | # which uses "virtual null merge" to explain this situation. | |
|
1694 | if isancestor(p, np): | |
|
1695 | bases[i] = nullrev | |
|
1696 | ||
|
1697 | 1669 | # If one parent becomes an ancestor of the other, drop the ancestor |
|
1698 | 1670 | for j, x in enumerate(newps[:i]): |
|
1699 | 1671 | if x == nullrev: |
@@ -1739,12 +1711,6 b' def defineparents(repo, rev, destmap, st' | |||
|
1739 | 1711 | if any(p != nullrev and isancestor(rev, p) for p in newps): |
|
1740 | 1712 | raise error.Abort(_(b'source is ancestor of destination')) |
|
1741 | 1713 | |
|
1742 | # "rebasenode" updates to new p1, use the corresponding merge base. | |
|
1743 | if bases[0] != nullrev: | |
|
1744 | base = bases[0] | |
|
1745 | else: | |
|
1746 | base = None | |
|
1747 | ||
|
1748 | 1714 | # Check if the merge will contain unwanted changes. That may happen if |
|
1749 | 1715 | # there are multiple special (non-changelog ancestor) merge bases, which |
|
1750 | 1716 | # cannot be handled well by the 3-way merge algorithm. For example: |
@@ -1760,15 +1726,16 b' def defineparents(repo, rev, destmap, st' | |||
|
1760 | 1726 | # But our merge base candidates (D and E in above case) could still be |
|
1761 | 1727 | # better than the default (ancestor(F, Z) == null). Therefore still |
|
1762 | 1728 | # pick one (so choose p1 above). |
|
1763 | if sum(1 for b in set(bases) if b != nullrev) > 1: | |
|
1729 | if sum(1 for b in set(bases) if b != nullrev and b not in newps) > 1: | |
|
1764 | 1730 | unwanted = [None, None] # unwanted[i]: unwanted revs if choose bases[i] |
|
1765 | 1731 | for i, base in enumerate(bases): |
|
1766 | if base == nullrev: | |
|
1732 | if base == nullrev or base in newps: | |
|
1767 | 1733 | continue |
|
1768 | 1734 | # Revisions in the side (not chosen as merge base) branch that |
|
1769 | 1735 | # might contain "surprising" contents |
|
1736 | other_bases = set(bases) - {base} | |
|
1770 | 1737 | siderevs = list( |
|
1771 |
repo.revs(b'( |
|
|
1738 | repo.revs(b'(%ld %% (%d+%d))', other_bases, base, dest) | |
|
1772 | 1739 | ) |
|
1773 | 1740 | |
|
1774 | 1741 | # If those revisions are covered by rebaseset, the result is good. |
@@ -1786,20 +1753,13 b' def defineparents(repo, rev, destmap, st' | |||
|
1786 | 1753 | ) |
|
1787 | 1754 | ) |
|
1788 | 1755 | |
|
1756 | if any(revs is not None for revs in unwanted): | |
|
1789 | 1757 | # Choose a merge base that has a minimal number of unwanted revs. |
|
1790 | 1758 | l, i = min( |
|
1791 | 1759 | (len(revs), i) |
|
1792 | 1760 | for i, revs in enumerate(unwanted) |
|
1793 | 1761 | if revs is not None |
|
1794 | 1762 | ) |
|
1795 | base = bases[i] | |
|
1796 | ||
|
1797 | # newps[0] should match merge base if possible. Currently, if newps[i] | |
|
1798 | # is nullrev, the only case is newps[i] and newps[j] (j < i), one is | |
|
1799 | # the other's ancestor. In that case, it's fine to not swap newps here. | |
|
1800 | # (see CASE-1 and CASE-2 above) | |
|
1801 | if i != 0 and newps[i] != nullrev: | |
|
1802 | newps[0], newps[i] = newps[i], newps[0] | |
|
1803 | 1763 | |
|
1804 | 1764 | # The merge will include unwanted revisions. Abort now. Revisit this if |
|
1805 | 1765 | # we have a more advanced merge algorithm that handles multiple bases. |
@@ -1816,6 +1776,18 b' def defineparents(repo, rev, destmap, st' | |||
|
1816 | 1776 | % (rev, repo[rev], unwanteddesc) |
|
1817 | 1777 | ) |
|
1818 | 1778 | |
|
1779 | # newps[0] should match merge base if possible. Currently, if newps[i] | |
|
1780 | # is nullrev, the only case is newps[i] and newps[j] (j < i), one is | |
|
1781 | # the other's ancestor. In that case, it's fine to not swap newps here. | |
|
1782 | # (see CASE-1 and CASE-2 above) | |
|
1783 | if i != 0: | |
|
1784 | if newps[i] != nullrev: | |
|
1785 | newps[0], newps[i] = newps[i], newps[0] | |
|
1786 | bases[0], bases[i] = bases[i], bases[0] | |
|
1787 | ||
|
1788 | # "rebasenode" updates to new p1, use the corresponding merge base. | |
|
1789 | base = bases[0] | |
|
1790 | ||
|
1819 | 1791 | repo.ui.debug(b" future parents are %d and %d\n" % tuple(newps)) |
|
1820 | 1792 | |
|
1821 | 1793 | return newps[0], newps[1], base |
@@ -1962,7 +1934,7 b' def buildstate(repo, destmap, collapse):' | |||
|
1962 | 1934 | # applied patch. But it prevents messing up the working directory when |
|
1963 | 1935 | # a partially completed rebase is blocked by mq. |
|
1964 | 1936 | if b'qtip' in repo.tags(): |
|
1965 |
mqapplied = |
|
|
1937 | mqapplied = {repo[s.node].rev() for s in repo.mq.applied} | |
|
1966 | 1938 | if set(destmap.values()) & mqapplied: |
|
1967 | 1939 | raise error.Abort(_(b'cannot rebase onto an applied mq patch')) |
|
1968 | 1940 | |
@@ -2147,7 +2119,7 b' def pullrebase(orig, ui, repo, *args, **' | |||
|
2147 | 2119 | |
|
2148 | 2120 | def _filterobsoleterevs(repo, revs): |
|
2149 | 2121 | """returns a set of the obsolete revisions in revs""" |
|
2150 |
return |
|
|
2122 | return {r for r in revs if repo[r].obsolete()} | |
|
2151 | 2123 | |
|
2152 | 2124 | |
|
2153 | 2125 | def _computeobsoletenotrebased(repo, rebaseobsrevs, destmap): |
@@ -654,7 +654,7 b' def releasenotes(ui, repo, file_=None, *' | |||
|
654 | 654 | opts = pycompat.byteskwargs(opts) |
|
655 | 655 | sections = releasenotessections(ui, repo) |
|
656 | 656 | |
|
657 | cmdutil.check_incompatible_arguments(opts, b'list', b'rev', b'check') | |
|
657 | cmdutil.check_incompatible_arguments(opts, b'list', [b'rev', b'check']) | |
|
658 | 658 | |
|
659 | 659 | if opts.get(b'list'): |
|
660 | 660 | return _getadmonitionlist(ui, sections) |
@@ -737,7 +737,7 b' def onetimeclientsetup(ui):' | |||
|
737 | 737 | # "link" is actually wrong here (it is set to len(changelog)) |
|
738 | 738 | # if changelog remains unchanged, skip writing file revisions |
|
739 | 739 | # but still do a sanity check about pending multiple revisions |
|
740 |
if len( |
|
|
740 | if len({x[3] for x in pendingfilecommits}) > 1: | |
|
741 | 741 | raise error.ProgrammingError( |
|
742 | 742 | b'pending multiple integer revisions are not supported' |
|
743 | 743 | ) |
@@ -101,7 +101,7 b' class _cachebackedpacks(object):' | |||
|
101 | 101 | self._lastpack = pack |
|
102 | 102 | yield pack |
|
103 | 103 | |
|
104 |
cachedpacks = |
|
|
104 | cachedpacks = {pack for pack in self._lrucache} | |
|
105 | 105 | # Yield for paths not in the cache. |
|
106 | 106 | for pack in self._packs - cachedpacks: |
|
107 | 107 | self._lastpack = pack |
@@ -259,7 +259,7 b' class basepackstore(object):' | |||
|
259 | 259 | newpacks = [] |
|
260 | 260 | if now > self.lastrefresh + REFRESHRATE: |
|
261 | 261 | self.lastrefresh = now |
|
262 |
previous = |
|
|
262 | previous = {p.path for p in self.packs} | |
|
263 | 263 | for filepath, __, __ in self._getavailablepackfilessorted(): |
|
264 | 264 | if filepath not in previous: |
|
265 | 265 | newpack = self.getpack(filepath) |
@@ -300,7 +300,7 b' class manifestrevlogstore(object):' | |||
|
300 | 300 | |
|
301 | 301 | rl = self._revlog(name) |
|
302 | 302 | ancestors = {} |
|
303 |
missing = |
|
|
303 | missing = {node} | |
|
304 | 304 | for ancrev in rl.ancestors([rl.rev(node)], inclusive=True): |
|
305 | 305 | ancnode = rl.node(ancrev) |
|
306 | 306 | missing.discard(ancnode) |
@@ -271,9 +271,9 b' class datapack(basepack.basepack):' | |||
|
271 | 271 | def cleanup(self, ledger): |
|
272 | 272 | entries = ledger.sources.get(self, []) |
|
273 | 273 | allkeys = set(self) |
|
274 |
repackedkeys = |
|
|
274 | repackedkeys = { | |
|
275 | 275 | (e.filename, e.node) for e in entries if e.datarepacked or e.gced |
|
276 |
|
|
|
276 | } | |
|
277 | 277 | |
|
278 | 278 | if len(allkeys - repackedkeys) == 0: |
|
279 | 279 | if self.path not in ledger.created: |
@@ -132,7 +132,7 b' class historypack(basepack.basepack):' | |||
|
132 | 132 | known = set() |
|
133 | 133 | section = self._findsection(name) |
|
134 | 134 | filename, offset, size, nodeindexoffset, nodeindexsize = section |
|
135 |
pending = |
|
|
135 | pending = {node} | |
|
136 | 136 | o = 0 |
|
137 | 137 | while o < size: |
|
138 | 138 | if not pending: |
@@ -291,9 +291,9 b' class historypack(basepack.basepack):' | |||
|
291 | 291 | def cleanup(self, ledger): |
|
292 | 292 | entries = ledger.sources.get(self, []) |
|
293 | 293 | allkeys = set(self) |
|
294 |
repackedkeys = |
|
|
294 | repackedkeys = { | |
|
295 | 295 | (e.filename, e.node) for e in entries if e.historyrepacked |
|
296 |
|
|
|
296 | } | |
|
297 | 297 | |
|
298 | 298 | if len(allkeys - repackedkeys) == 0: |
|
299 | 299 | if self.path not in ledger.created: |
@@ -452,7 +452,7 b' class mutablehistorypack(basepack.mutabl' | |||
|
452 | 452 | sectionstart = self.packfp.tell() |
|
453 | 453 | |
|
454 | 454 | # Write the file section content |
|
455 |
entrymap = |
|
|
455 | entrymap = {e[0]: e for e in entries} | |
|
456 | 456 | |
|
457 | 457 | def parentfunc(node): |
|
458 | 458 | x, p1, p2, x, x, x = entrymap[node] |
@@ -259,6 +259,10 b' class remotefilelog(object):' | |||
|
259 | 259 | |
|
260 | 260 | raise RuntimeError(b"len not supported") |
|
261 | 261 | |
|
262 | def heads(self): | |
|
263 | # Fake heads of the filelog to satisfy hgweb. | |
|
264 | return [] | |
|
265 | ||
|
262 | 266 | def empty(self): |
|
263 | 267 | return False |
|
264 | 268 | |
@@ -429,7 +433,7 b' class remotefilelog(object):' | |||
|
429 | 433 | return nullid |
|
430 | 434 | |
|
431 | 435 | revmap, parentfunc = self._buildrevgraph(a, b) |
|
432 |
nodemap = |
|
|
436 | nodemap = {v: k for (k, v) in pycompat.iteritems(revmap)} | |
|
433 | 437 | |
|
434 | 438 | ancs = ancestor.ancestors(parentfunc, revmap[a], revmap[b]) |
|
435 | 439 | if ancs: |
@@ -444,7 +448,7 b' class remotefilelog(object):' | |||
|
444 | 448 | return nullid |
|
445 | 449 | |
|
446 | 450 | revmap, parentfunc = self._buildrevgraph(a, b) |
|
447 |
nodemap = |
|
|
451 | nodemap = {v: k for (k, v) in pycompat.iteritems(revmap)} | |
|
448 | 452 | |
|
449 | 453 | ancs = ancestor.commonancestorsheads(parentfunc, revmap[a], revmap[b]) |
|
450 | 454 | return map(nodemap.__getitem__, ancs) |
@@ -321,7 +321,7 b' def _computeincrementalhistorypack(ui, f' | |||
|
321 | 321 | |
|
322 | 322 | def _allpackfileswithsuffix(files, packsuffix, indexsuffix): |
|
323 | 323 | result = [] |
|
324 |
fileset = |
|
|
324 | fileset = {fn for fn, mode, stat in files} | |
|
325 | 325 | for filename, mode, stat in files: |
|
326 | 326 | if not filename.endswith(packsuffix): |
|
327 | 327 | continue |
@@ -97,7 +97,7 b' class ShortRepository(object):' | |||
|
97 | 97 | parts = parts[:-1] |
|
98 | 98 | else: |
|
99 | 99 | tail = b'' |
|
100 |
context = |
|
|
100 | context = {b'%d' % (i + 1): v for i, v in enumerate(parts)} | |
|
101 | 101 | return b''.join(self.templater.process(self.url, context)) + tail |
|
102 | 102 | |
|
103 | 103 |
@@ -246,7 +246,7 b' def _setupdirstate(ui):' | |||
|
246 | 246 | if changedfiles is not None: |
|
247 | 247 | # In _rebuild, these files will be deleted from the dirstate |
|
248 | 248 | # when they are not found to be in allfiles |
|
249 |
dirstatefilestoremove = |
|
|
249 | dirstatefilestoremove = {f for f in self if not matcher(f)} | |
|
250 | 250 | changedfiles = dirstatefilestoremove.union(changedfiles) |
|
251 | 251 | |
|
252 | 252 | return orig(self, parent, allfiles, changedfiles) |
@@ -228,7 +228,7 b' def stripcmd(ui, repo, *revs, **opts):' | |||
|
228 | 228 | for p in repo.dirstate.parents() |
|
229 | 229 | ) |
|
230 | 230 | |
|
231 |
rootnodes = |
|
|
231 | rootnodes = {cl.node(r) for r in roots} | |
|
232 | 232 | |
|
233 | 233 | q = getattr(repo, 'mq', None) |
|
234 | 234 | if q is not None and q.applied: |
@@ -761,12 +761,12 b' def _dotransplant(ui, repo, *revs, **opt' | |||
|
761 | 761 | def checkopts(opts, revs): |
|
762 | 762 | if opts.get(b'continue'): |
|
763 | 763 | cmdutil.check_incompatible_arguments( |
|
764 | opts, b'continue', b'branch', b'all', b'merge' | |
|
764 | opts, b'continue', [b'branch', b'all', b'merge'] | |
|
765 | 765 | ) |
|
766 | 766 | return |
|
767 | 767 | if opts.get(b'stop'): |
|
768 | 768 | cmdutil.check_incompatible_arguments( |
|
769 | opts, b'stop', b'branch', b'all', b'merge' | |
|
769 | opts, b'stop', [b'branch', b'all', b'merge'] | |
|
770 | 770 | ) |
|
771 | 771 | return |
|
772 | 772 | if not ( |
@@ -840,10 +840,10 b' def _dotransplant(ui, repo, *revs, **opt' | |||
|
840 | 840 | |
|
841 | 841 | tf = tp.transplantfilter(repo, source, p1) |
|
842 | 842 | if opts.get(b'prune'): |
|
843 |
prune = |
|
|
843 | prune = { | |
|
844 | 844 | source[r].node() |
|
845 | 845 | for r in scmutil.revrange(source, opts.get(b'prune')) |
|
846 |
|
|
|
846 | } | |
|
847 | 847 | matchfn = lambda x: tf(x) and x not in prune |
|
848 | 848 | else: |
|
849 | 849 | matchfn = tf |
@@ -65,7 +65,7 b' def _commitfiltered(' | |||
|
65 | 65 | base = ctx.p1() |
|
66 | 66 | # ctx |
|
67 | 67 | initialfiles = set(ctx.files()) |
|
68 |
exclude = |
|
|
68 | exclude = {f for f in initialfiles if match(f)} | |
|
69 | 69 | |
|
70 | 70 | # No files matched commit, so nothing excluded |
|
71 | 71 | if not exclude: |
@@ -78,9 +78,9 b' def _commitfiltered(' | |||
|
78 | 78 | files = initialfiles - exclude |
|
79 | 79 | # Filter copies |
|
80 | 80 | copied = copiesmod.pathcopies(base, ctx) |
|
81 |
copied = |
|
|
82 |
|
|
|
83 |
|
|
|
81 | copied = { | |
|
82 | dst: src for dst, src in pycompat.iteritems(copied) if dst in files | |
|
83 | } | |
|
84 | 84 | |
|
85 | 85 | def filectxfn(repo, memctx, path, contentctx=ctx, redirect=()): |
|
86 | 86 | if path not in contentctx: |
@@ -722,8 +722,8 b' class POFile(_BaseFile):' | |||
|
722 | 722 | object POFile, the reference catalog. |
|
723 | 723 | """ |
|
724 | 724 | # Store entries in dict/set for faster access |
|
725 |
self_entries = |
|
|
726 |
refpot_msgids = |
|
|
725 | self_entries = {entry.msgid: entry for entry in self} | |
|
726 | refpot_msgids = {entry.msgid for entry in refpot} | |
|
727 | 727 | # Merge entries that are in the refpot |
|
728 | 728 | for entry in refpot: |
|
729 | 729 | e = self_entries.get(entry.msgid) |
@@ -1808,9 +1808,9 b' class _MOFileParser(object):' | |||
|
1808 | 1808 | entry = self._build_entry( |
|
1809 | 1809 | msgid=msgid_tokens[0], |
|
1810 | 1810 | msgid_plural=msgid_tokens[1], |
|
1811 |
msgstr_plural= |
|
|
1812 |
|
|
|
1813 |
|
|
|
1811 | msgstr_plural={ | |
|
1812 | k: v for k, v in enumerate(msgstr.split(b('\0'))) | |
|
1813 | }, | |
|
1814 | 1814 | ) |
|
1815 | 1815 | else: |
|
1816 | 1816 | entry = self._build_entry(msgid=msgid, msgstr=msgstr) |
@@ -138,7 +138,7 b' def ancestors(pfunc, *orignodes):' | |||
|
138 | 138 | k = 0 |
|
139 | 139 | for i in interesting: |
|
140 | 140 | k |= i |
|
141 |
return |
|
|
141 | return {n for (i, n) in mapping if k & i} | |
|
142 | 142 | |
|
143 | 143 | gca = commonancestorsheads(pfunc, *orignodes) |
|
144 | 144 | |
@@ -393,39 +393,3 b' class lazyancestors(object):' | |||
|
393 | 393 | # free up memory. |
|
394 | 394 | self._containsiter = None |
|
395 | 395 | return False |
|
396 | ||
|
397 | ||
|
398 | class rustlazyancestors(object): | |
|
399 | def __init__(self, index, revs, stoprev=0, inclusive=False): | |
|
400 | self._index = index | |
|
401 | self._stoprev = stoprev | |
|
402 | self._inclusive = inclusive | |
|
403 | # no need to prefilter out init revs that are smaller than stoprev, | |
|
404 | # it's done by rustlazyancestors constructor. | |
|
405 | # we need to convert to a list, because our ruslazyancestors | |
|
406 | # constructor (from C code) doesn't understand anything else yet | |
|
407 | self._initrevs = initrevs = list(revs) | |
|
408 | ||
|
409 | self._containsiter = parsers.rustlazyancestors( | |
|
410 | index, initrevs, stoprev, inclusive | |
|
411 | ) | |
|
412 | ||
|
413 | def __nonzero__(self): | |
|
414 | """False if the set is empty, True otherwise. | |
|
415 | ||
|
416 | It's better to duplicate this essentially trivial method than | |
|
417 | to subclass lazyancestors | |
|
418 | """ | |
|
419 | try: | |
|
420 | next(iter(self)) | |
|
421 | return True | |
|
422 | except StopIteration: | |
|
423 | return False | |
|
424 | ||
|
425 | def __iter__(self): | |
|
426 | return parsers.rustlazyancestors( | |
|
427 | self._index, self._initrevs, self._stoprev, self._inclusive | |
|
428 | ) | |
|
429 | ||
|
430 | def __contains__(self, target): | |
|
431 | return target in self._containsiter |
@@ -359,7 +359,7 b' def archive(' | |||
|
359 | 359 | if match(name): |
|
360 | 360 | write(name, 0o644, False, lambda: buildmetadata(ctx)) |
|
361 | 361 | |
|
362 |
files = |
|
|
362 | files = list(ctx.manifest().walk(match)) | |
|
363 | 363 | total = len(files) |
|
364 | 364 | if total: |
|
365 | 365 | files.sort() |
@@ -173,6 +173,8 b' class bmstore(object):' | |||
|
173 | 173 | nrefs.sort() |
|
174 | 174 | |
|
175 | 175 | def _del(self, mark): |
|
176 | if mark not in self._refmap: | |
|
177 | return | |
|
176 | 178 | self._clean = False |
|
177 | 179 | node = self._refmap.pop(mark) |
|
178 | 180 | nrefs = self._nodemap[node] |
@@ -461,6 +463,10 b' def update(repo, parents, node):' | |||
|
461 | 463 | return bool(bmchanges) |
|
462 | 464 | |
|
463 | 465 | |
|
466 | def isdivergent(b): | |
|
467 | return b'@' in b and not b.endswith(b'@') | |
|
468 | ||
|
469 | ||
|
464 | 470 | def listbinbookmarks(repo): |
|
465 | 471 | # We may try to list bookmarks on a repo type that does not |
|
466 | 472 | # support it (e.g., statichttprepository). |
@@ -469,7 +475,7 b' def listbinbookmarks(repo):' | |||
|
469 | 475 | hasnode = repo.changelog.hasnode |
|
470 | 476 | for k, v in pycompat.iteritems(marks): |
|
471 | 477 | # don't expose local divergent bookmarks |
|
472 |
if hasnode(v) and |
|
|
478 | if hasnode(v) and not isdivergent(k): | |
|
473 | 479 | yield k, v |
|
474 | 480 | |
|
475 | 481 | |
@@ -481,6 +487,8 b' def listbookmarks(repo):' | |||
|
481 | 487 | |
|
482 | 488 | |
|
483 | 489 | def pushbookmark(repo, key, old, new): |
|
490 | if isdivergent(key): | |
|
491 | return False | |
|
484 | 492 | if bookmarksinstore(repo): |
|
485 | 493 | wlock = util.nullcontextmanager() |
|
486 | 494 | else: |
@@ -291,8 +291,8 b' class branchcache(object):' | |||
|
291 | 291 | % ( |
|
292 | 292 | _branchcachedesc(repo), |
|
293 | 293 | pycompat.bytestr( |
|
294 |
inst |
|
|
295 | ), | |
|
294 | inst | |
|
295 | ), # pytype: disable=wrong-arg-types | |
|
296 | 296 | ) |
|
297 | 297 | ) |
|
298 | 298 | bcache = None |
@@ -446,7 +446,7 b' class branchcache(object):' | |||
|
446 | 446 | # 1 (branch a) -> 2 (branch b) -> 3 (branch a) |
|
447 | 447 | for branch, newheadrevs in pycompat.iteritems(newbranches): |
|
448 | 448 | bheads = self._entries.setdefault(branch, []) |
|
449 |
bheadset = |
|
|
449 | bheadset = {cl.rev(node) for node in bheads} | |
|
450 | 450 | |
|
451 | 451 | # This have been tested True on all internal usage of this function. |
|
452 | 452 | # run it again in case of doubt |
@@ -582,7 +582,7 b' class revbranchcache(object):' | |||
|
582 | 582 | |
|
583 | 583 | @util.propertycache |
|
584 | 584 | def _namesreverse(self): |
|
585 |
return |
|
|
585 | return {b: r for r, b in enumerate(self._names)} | |
|
586 | 586 | |
|
587 | 587 | def branchinfo(self, rev): |
|
588 | 588 | """Return branch name and close flag for rev, using and updating |
@@ -2368,6 +2368,11 b' def handlebookmark(op, inpart):' | |||
|
2368 | 2368 | b'prepushkey', throw=True, **pycompat.strkwargs(hookargs) |
|
2369 | 2369 | ) |
|
2370 | 2370 | |
|
2371 | for book, node in changes: | |
|
2372 | if bookmarks.isdivergent(book): | |
|
2373 | msg = _(b'cannot accept divergent bookmark %s!') % book | |
|
2374 | raise error.Abort(msg) | |
|
2375 | ||
|
2371 | 2376 | bookstore.applychanges(op.repo, op.gettransaction(), changes) |
|
2372 | 2377 | |
|
2373 | 2378 | if pushkeycompat: |
@@ -53,21 +53,35 b' static PyObject *nodeof(line *l)' | |||
|
53 | 53 | { |
|
54 | 54 | char *s = l->start; |
|
55 | 55 | Py_ssize_t llen = pathlen(l); |
|
56 | Py_ssize_t hlen = l->len - llen - 2; | |
|
57 | Py_ssize_t hlen_raw = 20; | |
|
56 | 58 | PyObject *hash; |
|
57 | 59 | if (llen + 1 + 40 + 1 > l->len) { /* path '\0' hash '\n' */ |
|
58 | 60 | PyErr_SetString(PyExc_ValueError, "manifest line too short"); |
|
59 | 61 | return NULL; |
|
60 | 62 | } |
|
61 | hash = unhexlify(s + llen + 1, 40); | |
|
63 | switch (hlen) { | |
|
64 | case 40: /* sha1 */ | |
|
65 | case 41: /* sha1 with cruft for a merge */ | |
|
66 | break; | |
|
67 | case 64: /* new hash */ | |
|
68 | case 65: /* new hash with cruft for a merge */ | |
|
69 | hlen_raw = 32; | |
|
70 | break; | |
|
71 | default: | |
|
72 | PyErr_SetString(PyExc_ValueError, "invalid node length in manifest"); | |
|
73 | return NULL; | |
|
74 | } | |
|
75 | hash = unhexlify(s + llen + 1, hlen_raw * 2); | |
|
62 | 76 | if (!hash) { |
|
63 | 77 | return NULL; |
|
64 | 78 | } |
|
65 | 79 | if (l->hash_suffix != '\0') { |
|
66 |
char newhash[ |
|
|
67 |
memcpy(newhash, PyBytes_AsString(hash), |
|
|
80 | char newhash[33]; | |
|
81 | memcpy(newhash, PyBytes_AsString(hash), hlen_raw); | |
|
68 | 82 | Py_DECREF(hash); |
|
69 |
newhash[ |
|
|
70 |
hash = PyBytes_FromStringAndSize(newhash, |
|
|
83 | newhash[hlen_raw] = l->hash_suffix; | |
|
84 | hash = PyBytes_FromStringAndSize(newhash, hlen_raw+1); | |
|
71 | 85 | } |
|
72 | 86 | return hash; |
|
73 | 87 | } |
@@ -78,15 +92,20 b' static PyObject *hashflags(line *l)' | |||
|
78 | 92 | char *s = l->start; |
|
79 | 93 | Py_ssize_t plen = pathlen(l); |
|
80 | 94 | PyObject *hash = nodeof(l); |
|
81 | ||
|
82 | /* 40 for hash, 1 for null byte, 1 for newline */ | |
|
83 | Py_ssize_t hplen = plen + 42; | |
|
84 | Py_ssize_t flen = l->len - hplen; | |
|
95 | ssize_t hlen; | |
|
96 | Py_ssize_t hplen, flen; | |
|
85 | 97 | PyObject *flags; |
|
86 | 98 | PyObject *tup; |
|
87 | 99 | |
|
88 | 100 | if (!hash) |
|
89 | 101 | return NULL; |
|
102 | /* hash is either 20 or 21 bytes for an old hash, so we use a | |
|
103 | ternary here to get the "real" hexlified sha length. */ | |
|
104 | hlen = PyBytes_GET_SIZE(hash) < 22 ? 40 : 64; | |
|
105 | /* 1 for null byte, 1 for newline */ | |
|
106 | hplen = plen + hlen + 2; | |
|
107 | flen = l->len - hplen; | |
|
108 | ||
|
90 | 109 | flags = PyBytes_FromStringAndSize(s + hplen - 1, flen); |
|
91 | 110 | if (!flags) { |
|
92 | 111 | Py_DECREF(hash); |
@@ -810,9 +810,10 b' static PyObject *setprocname(PyObject *s' | |||
|
810 | 810 | /* Check the memory we can use. Typically, argv[i] and |
|
811 | 811 | * argv[i + 1] are continuous. */ |
|
812 | 812 | for (i = 0; i < argc; ++i) { |
|
813 | size_t len; | |
|
813 | 814 | if (argv[i] > argvend || argv[i] < argvstart) |
|
814 | 815 | break; /* not continuous */ |
|
815 |
|
|
|
816 | len = strlen(argv[i]); | |
|
816 | 817 | argvend = argv[i] + len + 1 /* '\0' */; |
|
817 | 818 | } |
|
818 | 819 | if (argvend > argvstart) /* sanity check */ |
@@ -1169,10 +1170,10 b' static PyObject *getfsmountpoint(PyObjec' | |||
|
1169 | 1170 | static PyObject *unblocksignal(PyObject *self, PyObject *args) |
|
1170 | 1171 | { |
|
1171 | 1172 | int sig = 0; |
|
1173 | sigset_t set; | |
|
1172 | 1174 | int r; |
|
1173 | 1175 | if (!PyArg_ParseTuple(args, "i", &sig)) |
|
1174 | 1176 | return NULL; |
|
1175 | sigset_t set; | |
|
1176 | 1177 | r = sigemptyset(&set); |
|
1177 | 1178 | if (r != 0) |
|
1178 | 1179 | return PyErr_SetFromErrno(PyExc_OSError); |
@@ -39,6 +39,8 b' typedef struct {' | |||
|
39 | 39 | |
|
40 | 40 | typedef struct { |
|
41 | 41 | int abi_version; |
|
42 | Py_ssize_t (*index_length)(const indexObject *); | |
|
43 | const char *(*index_node)(indexObject *, Py_ssize_t); | |
|
42 | 44 | int (*index_parents)(PyObject *, int, int *); |
|
43 | 45 | } Revlog_CAPI; |
|
44 | 46 | |
@@ -212,7 +214,7 b' static inline int index_get_parents(inde' | |||
|
212 | 214 | * |
|
213 | 215 | * Returns 0 on success or -1 on failure. |
|
214 | 216 | */ |
|
215 | int HgRevlogIndex_GetParents(PyObject *op, int rev, int *ps) | |
|
217 | static int HgRevlogIndex_GetParents(PyObject *op, int rev, int *ps) | |
|
216 | 218 | { |
|
217 | 219 | int tiprev; |
|
218 | 220 | if (!op || !HgRevlogIndex_Check(op) || !ps) { |
@@ -2878,173 +2880,12 b' bail:' | |||
|
2878 | 2880 | return NULL; |
|
2879 | 2881 | } |
|
2880 | 2882 | |
|
2881 | #ifdef WITH_RUST | |
|
2882 | ||
|
2883 | /* rustlazyancestors: iteration over ancestors implemented in Rust | |
|
2884 | * | |
|
2885 | * This class holds a reference to an index and to the Rust iterator. | |
|
2886 | */ | |
|
2887 | typedef struct rustlazyancestorsObjectStruct rustlazyancestorsObject; | |
|
2888 | ||
|
2889 | struct rustlazyancestorsObjectStruct { | |
|
2890 | PyObject_HEAD | |
|
2891 | /* Type-specific fields go here. */ | |
|
2892 | indexObject *index; /* Ref kept to avoid GC'ing the index */ | |
|
2893 | void *iter; /* Rust iterator */ | |
|
2894 | }; | |
|
2895 | ||
|
2896 | /* FFI exposed from Rust code */ | |
|
2897 | rustlazyancestorsObject *rustlazyancestors_init(indexObject *index, | |
|
2898 | /* intrevs vector */ | |
|
2899 | Py_ssize_t initrevslen, | |
|
2900 | long *initrevs, long stoprev, | |
|
2901 | int inclusive); | |
|
2902 | void rustlazyancestors_drop(rustlazyancestorsObject *self); | |
|
2903 | int rustlazyancestors_next(rustlazyancestorsObject *self); | |
|
2904 | int rustlazyancestors_contains(rustlazyancestorsObject *self, long rev); | |
|
2905 | ||
|
2906 | /* CPython instance methods */ | |
|
2907 | static int rustla_init(rustlazyancestorsObject *self, PyObject *args) | |
|
2908 | { | |
|
2909 | PyObject *initrevsarg = NULL; | |
|
2910 | PyObject *inclusivearg = NULL; | |
|
2911 | long stoprev = 0; | |
|
2912 | long *initrevs = NULL; | |
|
2913 | int inclusive = 0; | |
|
2914 | Py_ssize_t i; | |
|
2915 | ||
|
2916 | indexObject *index; | |
|
2917 | if (!PyArg_ParseTuple(args, "O!O!lO!", &HgRevlogIndex_Type, &index, | |
|
2918 | &PyList_Type, &initrevsarg, &stoprev, | |
|
2919 | &PyBool_Type, &inclusivearg)) | |
|
2920 | return -1; | |
|
2921 | ||
|
2922 | Py_INCREF(index); | |
|
2923 | self->index = index; | |
|
2924 | ||
|
2925 | if (inclusivearg == Py_True) | |
|
2926 | inclusive = 1; | |
|
2927 | ||
|
2928 | Py_ssize_t linit = PyList_GET_SIZE(initrevsarg); | |
|
2929 | ||
|
2930 | initrevs = (long *)calloc(linit, sizeof(long)); | |
|
2931 | ||
|
2932 | if (initrevs == NULL) { | |
|
2933 | PyErr_NoMemory(); | |
|
2934 | goto bail; | |
|
2935 | } | |
|
2936 | ||
|
2937 | for (i = 0; i < linit; i++) { | |
|
2938 | initrevs[i] = PyInt_AsLong(PyList_GET_ITEM(initrevsarg, i)); | |
|
2939 | } | |
|
2940 | if (PyErr_Occurred()) | |
|
2941 | goto bail; | |
|
2942 | ||
|
2943 | self->iter = | |
|
2944 | rustlazyancestors_init(index, linit, initrevs, stoprev, inclusive); | |
|
2945 | if (self->iter == NULL) { | |
|
2946 | /* if this is because of GraphError::ParentOutOfRange | |
|
2947 | * HgRevlogIndex_GetParents() has already set the proper | |
|
2948 | * exception */ | |
|
2949 | goto bail; | |
|
2950 | } | |
|
2951 | ||
|
2952 | free(initrevs); | |
|
2953 | return 0; | |
|
2954 | ||
|
2955 | bail: | |
|
2956 | free(initrevs); | |
|
2957 | return -1; | |
|
2958 | }; | |
|
2959 | ||
|
2960 | static void rustla_dealloc(rustlazyancestorsObject *self) | |
|
2961 | { | |
|
2962 | Py_XDECREF(self->index); | |
|
2963 | if (self->iter != NULL) { /* can happen if rustla_init failed */ | |
|
2964 | rustlazyancestors_drop(self->iter); | |
|
2965 | } | |
|
2966 | PyObject_Del(self); | |
|
2967 | } | |
|
2968 | ||
|
2969 | static PyObject *rustla_next(rustlazyancestorsObject *self) | |
|
2970 | { | |
|
2971 | int res = rustlazyancestors_next(self->iter); | |
|
2972 | if (res == -1) { | |
|
2973 | /* Setting an explicit exception seems unnecessary | |
|
2974 | * as examples from Python source code (Objects/rangeobjets.c | |
|
2975 | * and Modules/_io/stringio.c) seem to demonstrate. | |
|
2976 | */ | |
|
2977 | return NULL; | |
|
2978 | } | |
|
2979 | return PyInt_FromLong(res); | |
|
2980 | } | |
|
2981 | ||
|
2982 | static int rustla_contains(rustlazyancestorsObject *self, PyObject *rev) | |
|
2983 | { | |
|
2984 | long lrev; | |
|
2985 | if (!pylong_to_long(rev, &lrev)) { | |
|
2986 | PyErr_Clear(); | |
|
2987 | return 0; | |
|
2988 | } | |
|
2989 | return rustlazyancestors_contains(self->iter, lrev); | |
|
2990 | } | |
|
2991 | ||
|
2992 | static PySequenceMethods rustla_sequence_methods = { | |
|
2993 | 0, /* sq_length */ | |
|
2994 | 0, /* sq_concat */ | |
|
2995 | 0, /* sq_repeat */ | |
|
2996 | 0, /* sq_item */ | |
|
2997 | 0, /* sq_slice */ | |
|
2998 | 0, /* sq_ass_item */ | |
|
2999 | 0, /* sq_ass_slice */ | |
|
3000 | (objobjproc)rustla_contains, /* sq_contains */ | |
|
3001 | }; | |
|
3002 | ||
|
3003 | static PyTypeObject rustlazyancestorsType = { | |
|
3004 | PyVarObject_HEAD_INIT(NULL, 0) /* header */ | |
|
3005 | "parsers.rustlazyancestors", /* tp_name */ | |
|
3006 | sizeof(rustlazyancestorsObject), /* tp_basicsize */ | |
|
3007 | 0, /* tp_itemsize */ | |
|
3008 | (destructor)rustla_dealloc, /* tp_dealloc */ | |
|
3009 | 0, /* tp_print */ | |
|
3010 | 0, /* tp_getattr */ | |
|
3011 | 0, /* tp_setattr */ | |
|
3012 | 0, /* tp_compare */ | |
|
3013 | 0, /* tp_repr */ | |
|
3014 | 0, /* tp_as_number */ | |
|
3015 | &rustla_sequence_methods, /* tp_as_sequence */ | |
|
3016 | 0, /* tp_as_mapping */ | |
|
3017 | 0, /* tp_hash */ | |
|
3018 | 0, /* tp_call */ | |
|
3019 | 0, /* tp_str */ | |
|
3020 | 0, /* tp_getattro */ | |
|
3021 | 0, /* tp_setattro */ | |
|
3022 | 0, /* tp_as_buffer */ | |
|
3023 | Py_TPFLAGS_DEFAULT, /* tp_flags */ | |
|
3024 | "Iterator over ancestors, implemented in Rust", /* tp_doc */ | |
|
3025 | 0, /* tp_traverse */ | |
|
3026 | 0, /* tp_clear */ | |
|
3027 | 0, /* tp_richcompare */ | |
|
3028 | 0, /* tp_weaklistoffset */ | |
|
3029 | 0, /* tp_iter */ | |
|
3030 | (iternextfunc)rustla_next, /* tp_iternext */ | |
|
3031 | 0, /* tp_methods */ | |
|
3032 | 0, /* tp_members */ | |
|
3033 | 0, /* tp_getset */ | |
|
3034 | 0, /* tp_base */ | |
|
3035 | 0, /* tp_dict */ | |
|
3036 | 0, /* tp_descr_get */ | |
|
3037 | 0, /* tp_descr_set */ | |
|
3038 | 0, /* tp_dictoffset */ | |
|
3039 | (initproc)rustla_init, /* tp_init */ | |
|
3040 | 0, /* tp_alloc */ | |
|
3041 | }; | |
|
3042 | #endif /* WITH_RUST */ | |
|
3043 | ||
|
3044 | 2883 | static Revlog_CAPI CAPI = { |
|
3045 | 2884 | /* increment the abi_version field upon each change in the Revlog_CAPI |
|
3046 | 2885 | struct or in the ABI of the listed functions */ |
|
3047 |
|
|
|
2886 | 2, | |
|
2887 | index_length, | |
|
2888 | index_node, | |
|
3048 | 2889 | HgRevlogIndex_GetParents, |
|
3049 | 2890 | }; |
|
3050 | 2891 | |
@@ -3074,13 +2915,4 b' void revlog_module_init(PyObject *mod)' | |||
|
3074 | 2915 | caps = PyCapsule_New(&CAPI, "mercurial.cext.parsers.revlog_CAPI", NULL); |
|
3075 | 2916 | if (caps != NULL) |
|
3076 | 2917 | PyModule_AddObject(mod, "revlog_CAPI", caps); |
|
3077 | ||
|
3078 | #ifdef WITH_RUST | |
|
3079 | rustlazyancestorsType.tp_new = PyType_GenericNew; | |
|
3080 | if (PyType_Ready(&rustlazyancestorsType) < 0) | |
|
3081 | return; | |
|
3082 | Py_INCREF(&rustlazyancestorsType); | |
|
3083 | PyModule_AddObject(mod, "rustlazyancestors", | |
|
3084 | (PyObject *)&rustlazyancestorsType); | |
|
3085 | #endif | |
|
3086 | 2918 | } |
@@ -14,6 +14,4 b' extern PyTypeObject HgRevlogIndex_Type;' | |||
|
14 | 14 | |
|
15 | 15 | #define HgRevlogIndex_Check(op) PyObject_TypeCheck(op, &HgRevlogIndex_Type) |
|
16 | 16 | |
|
17 | int HgRevlogIndex_GetParents(PyObject *op, int rev, int *ps); | |
|
18 | ||
|
19 | 17 | #endif /* _HG_REVLOG_H_ */ |
@@ -993,7 +993,7 b' class cgpacker(object):' | |||
|
993 | 993 | ] |
|
994 | 994 | |
|
995 | 995 | manifests.clear() |
|
996 |
clrevs = |
|
|
996 | clrevs = {cl.rev(x) for x in clnodes} | |
|
997 | 997 | |
|
998 | 998 | it = self.generatefiles( |
|
999 | 999 | changedfiles, |
@@ -1149,8 +1149,8 b' class cgpacker(object):' | |||
|
1149 | 1149 | if fastpathlinkrev: |
|
1150 | 1150 | assert not tree |
|
1151 | 1151 | return ( |
|
1152 |
manifests.__getitem__ |
|
|
1153 | ) | |
|
1152 | manifests.__getitem__ | |
|
1153 | ) # pytype: disable=unsupported-operands | |
|
1154 | 1154 | |
|
1155 | 1155 | def lookupmflinknode(x): |
|
1156 | 1156 | """Callback for looking up the linknode for manifests. |
@@ -1282,9 +1282,7 b' class cgpacker(object):' | |||
|
1282 | 1282 | flinkrev = store.linkrev |
|
1283 | 1283 | fnode = store.node |
|
1284 | 1284 | revs = ((r, flinkrev(r)) for r in store) |
|
1285 | return dict( | |
|
1286 | (fnode(r), cln(lr)) for r, lr in revs if lr in clrevs | |
|
1287 | ) | |
|
1285 | return {fnode(r): cln(lr) for r, lr in revs if lr in clrevs} | |
|
1288 | 1286 | |
|
1289 | 1287 | clrevtolocalrev = {} |
|
1290 | 1288 |
@@ -161,15 +161,18 b' class appender(object):' | |||
|
161 | 161 | return self.fp.__exit__(*args) |
|
162 | 162 | |
|
163 | 163 | |
|
164 |
|
|
|
165 | """build an opener that writes in 'target.a' instead of 'target'""" | |
|
164 | class _divertopener(object): | |
|
165 | def __init__(self, opener, target): | |
|
166 | self._opener = opener | |
|
167 | self._target = target | |
|
166 | 168 | |
|
167 |
def _ |
|
|
168 | if name != target: | |
|
169 | return opener(name, mode, **kwargs) | |
|
170 | return opener(name + b".a", mode, **kwargs) | |
|
169 | def __call__(self, name, mode=b'r', checkambig=False, **kwargs): | |
|
170 | if name != self._target: | |
|
171 | return self._opener(name, mode, **kwargs) | |
|
172 | return self._opener(name + b".a", mode, **kwargs) | |
|
171 | 173 | |
|
172 | return _divert | |
|
174 | def __getattr__(self, attr): | |
|
175 | return getattr(self._opener, attr) | |
|
173 | 176 | |
|
174 | 177 | |
|
175 | 178 | def _delayopener(opener, target, buf): |
@@ -382,6 +385,9 b' class changelog(revlog.revlog):' | |||
|
382 | 385 | datafile=datafile, |
|
383 | 386 | checkambig=True, |
|
384 | 387 | mmaplargeindex=True, |
|
388 | persistentnodemap=opener.options.get( | |
|
389 | b'exp-persistent-nodemap', False | |
|
390 | ), | |
|
385 | 391 | ) |
|
386 | 392 | |
|
387 | 393 | if self._initempty and (self.version & 0xFFFF == revlog.REVLOGV1): |
@@ -80,9 +80,12 b' def _hashlist(items):' | |||
|
80 | 80 | # sensitive config sections affecting confighash |
|
81 | 81 | _configsections = [ |
|
82 | 82 | b'alias', # affects global state commands.table |
|
83 | b'diff-tools', # affects whether gui or not in extdiff's uisetup | |
|
83 | 84 | b'eol', # uses setconfig('eol', ...) |
|
84 | 85 | b'extdiff', # uisetup will register new commands |
|
85 | 86 | b'extensions', |
|
87 | b'fastannotate', # affects annotate command and adds fastannonate cmd | |
|
88 | b'merge-tools', # affects whether gui or not in extdiff's uisetup | |
|
86 | 89 | b'schemes', # extsetup will update global hg.schemes |
|
87 | 90 | ] |
|
88 | 91 | |
@@ -525,7 +528,7 b' class chgcmdserver(commandserver.server)' | |||
|
525 | 528 | def _setumask(self, data): |
|
526 | 529 | mask = struct.unpack(b'>I', data)[0] |
|
527 | 530 | self.ui.log(b'chgserver', b'setumask %r\n', mask) |
|
528 |
|
|
|
531 | util.setumask(mask) | |
|
529 | 532 | |
|
530 | 533 | def runcommand(self): |
|
531 | 534 | # pager may be attached within the runcommand session, which should |
@@ -551,40 +554,6 b' class chgcmdserver(commandserver.server)' | |||
|
551 | 554 | raise ValueError(b'unexpected value in setenv request') |
|
552 | 555 | self.ui.log(b'chgserver', b'setenv: %r\n', sorted(newenv.keys())) |
|
553 | 556 | |
|
554 | # Python3 has some logic to "coerce" the C locale to a UTF-8 capable | |
|
555 | # one, and it sets LC_CTYPE in the environment to C.UTF-8 if none of | |
|
556 | # 'LC_CTYPE', 'LC_ALL' or 'LANG' are set (to any value). This can be | |
|
557 | # disabled with PYTHONCOERCECLOCALE=0 in the environment. | |
|
558 | # | |
|
559 | # When fromui is called via _inithashstate, python has already set | |
|
560 | # this, so that's in the environment right when we start up the hg | |
|
561 | # process. Then chg will call us and tell us to set the environment to | |
|
562 | # the one it has; this might NOT have LC_CTYPE, so we'll need to | |
|
563 | # carry-forward the LC_CTYPE that was coerced in these situations. | |
|
564 | # | |
|
565 | # If this is not handled, we will fail config+env validation and fail | |
|
566 | # to start chg. If this is just ignored instead of carried forward, we | |
|
567 | # may have different behavior between chg and non-chg. | |
|
568 | if pycompat.ispy3: | |
|
569 | # Rename for wordwrapping purposes | |
|
570 | oldenv = encoding.environ | |
|
571 | if not any( | |
|
572 | e.get(b'PYTHONCOERCECLOCALE') == b'0' for e in [oldenv, newenv] | |
|
573 | ): | |
|
574 | keys = [b'LC_CTYPE', b'LC_ALL', b'LANG'] | |
|
575 | old_keys = [k for k, v in oldenv.items() if k in keys and v] | |
|
576 | new_keys = [k for k, v in newenv.items() if k in keys and v] | |
|
577 | # If the user's environment (from chg) doesn't have ANY of the | |
|
578 | # keys that python looks for, and the environment (from | |
|
579 | # initialization) has ONLY LC_CTYPE and it's set to C.UTF-8, | |
|
580 | # carry it forward. | |
|
581 | if ( | |
|
582 | not new_keys | |
|
583 | and old_keys == [b'LC_CTYPE'] | |
|
584 | and oldenv[b'LC_CTYPE'] == b'C.UTF-8' | |
|
585 | ): | |
|
586 | newenv[b'LC_CTYPE'] = oldenv[b'LC_CTYPE'] | |
|
587 | ||
|
588 | 557 | encoding.environ.clear() |
|
589 | 558 | encoding.environ.update(newenv) |
|
590 | 559 | |
@@ -731,6 +700,16 b' def chgunixservice(ui, repo, opts):' | |||
|
731 | 700 | # environ cleaner. |
|
732 | 701 | if b'CHGINTERNALMARK' in encoding.environ: |
|
733 | 702 | del encoding.environ[b'CHGINTERNALMARK'] |
|
703 | # Python3.7+ "coerces" the LC_CTYPE environment variable to a UTF-8 one if | |
|
704 | # it thinks the current value is "C". This breaks the hash computation and | |
|
705 | # causes chg to restart loop. | |
|
706 | if b'CHGORIG_LC_CTYPE' in encoding.environ: | |
|
707 | encoding.environ[b'LC_CTYPE'] = encoding.environ[b'CHGORIG_LC_CTYPE'] | |
|
708 | del encoding.environ[b'CHGORIG_LC_CTYPE'] | |
|
709 | elif b'CHG_CLEAR_LC_CTYPE' in encoding.environ: | |
|
710 | if b'LC_CTYPE' in encoding.environ: | |
|
711 | del encoding.environ[b'LC_CTYPE'] | |
|
712 | del encoding.environ[b'CHG_CLEAR_LC_CTYPE'] | |
|
734 | 713 | |
|
735 | 714 | if repo: |
|
736 | 715 | # one chgserver can serve multiple repos. drop repo information |
@@ -170,7 +170,12 b' logopts = [' | |||
|
170 | 170 | |
|
171 | 171 | diffopts = [ |
|
172 | 172 | (b'a', b'text', None, _(b'treat all files as text')), |
|
173 | (b'g', b'git', None, _(b'use git extended diff format')), | |
|
173 | ( | |
|
174 | b'g', | |
|
175 | b'git', | |
|
176 | None, | |
|
177 | _(b'use git extended diff format (DEFAULT: diff.git)'), | |
|
178 | ), | |
|
174 | 179 | (b'', b'binary', None, _(b'generate binary diffs in git mode (default)')), |
|
175 | 180 | (b'', b'nodates', None, _(b'omit dates from diff headers')), |
|
176 | 181 | ] |
@@ -209,7 +214,9 b' diffopts2 = (' | |||
|
209 | 214 | b'p', |
|
210 | 215 | b'show-function', |
|
211 | 216 | None, |
|
212 | _(b'show which function each change is in'), | |
|
217 | _( | |
|
218 | b'show which function each change is in (DEFAULT: diff.showfunc)' | |
|
219 | ), | |
|
213 | 220 | ), |
|
214 | 221 | (b'', b'reverse', None, _(b'produce a diff that undoes the changes')), |
|
215 | 222 | ] |
@@ -281,11 +288,11 b' def check_at_most_one_arg(opts, *args):' | |||
|
281 | 288 | return previous |
|
282 | 289 | |
|
283 | 290 | |
|
284 |
def check_incompatible_arguments(opts, first, |
|
|
291 | def check_incompatible_arguments(opts, first, others): | |
|
285 | 292 | """abort if the first argument is given along with any of the others |
|
286 | 293 | |
|
287 | 294 | Unlike check_at_most_one_arg(), `others` are not mutually exclusive |
|
288 | among themselves. | |
|
295 | among themselves, and they're passed as a single collection. | |
|
289 | 296 | """ |
|
290 | 297 | for other in others: |
|
291 | 298 | check_at_most_one_arg(opts, first, other) |
@@ -584,15 +591,8 b' def dorecord(' | |||
|
584 | 591 | [os.unlink(repo.wjoin(c)) for c in newlyaddedandmodifiedfiles] |
|
585 | 592 | # 3a. apply filtered patch to clean repo (clean) |
|
586 | 593 | if backups: |
|
587 | # Equivalent to hg.revert | |
|
588 | 594 | m = scmutil.matchfiles(repo, set(backups.keys()) | alsorestore) |
|
589 |
mergemod. |
|
|
590 | repo, | |
|
591 | repo.dirstate.p1(), | |
|
592 | branchmerge=False, | |
|
593 | force=True, | |
|
594 | matcher=m, | |
|
595 | ) | |
|
595 | mergemod.revert_to(repo[b'.'], matcher=m) | |
|
596 | 596 | |
|
597 | 597 | # 3b. (apply) |
|
598 | 598 | if dopatch: |
@@ -1414,46 +1414,165 b' def openrevlog(repo, cmd, file_, opts):' | |||
|
1414 | 1414 | |
|
1415 | 1415 | |
|
1416 | 1416 | def copy(ui, repo, pats, opts, rename=False): |
|
1417 | check_incompatible_arguments(opts, b'forget', [b'dry_run']) | |
|
1418 | ||
|
1417 | 1419 | # called with the repo lock held |
|
1418 | 1420 | # |
|
1419 | 1421 | # hgsep => pathname that uses "/" to separate directories |
|
1420 | 1422 | # ossep => pathname that uses os.sep to separate directories |
|
1421 | 1423 | cwd = repo.getcwd() |
|
1422 | 1424 | targets = {} |
|
1425 | forget = opts.get(b"forget") | |
|
1423 | 1426 | after = opts.get(b"after") |
|
1424 | 1427 | dryrun = opts.get(b"dry_run") |
|
1425 | wctx = repo[None] | |
|
1428 | rev = opts.get(b'at_rev') | |
|
1429 | if rev: | |
|
1430 | if not forget and not after: | |
|
1431 | # TODO: Remove this restriction and make it also create the copy | |
|
1432 | # targets (and remove the rename source if rename==True). | |
|
1433 | raise error.Abort(_(b'--at-rev requires --after')) | |
|
1434 | ctx = scmutil.revsingle(repo, rev) | |
|
1435 | if len(ctx.parents()) > 1: | |
|
1436 | raise error.Abort(_(b'cannot mark/unmark copy in merge commit')) | |
|
1437 | else: | |
|
1438 | ctx = repo[None] | |
|
1439 | ||
|
1440 | pctx = ctx.p1() | |
|
1426 | 1441 | |
|
1427 | 1442 | uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True) |
|
1428 | 1443 | |
|
1444 | if forget: | |
|
1445 | if ctx.rev() is None: | |
|
1446 | new_ctx = ctx | |
|
1447 | else: | |
|
1448 | if len(ctx.parents()) > 1: | |
|
1449 | raise error.Abort(_(b'cannot unmark copy in merge commit')) | |
|
1450 | # avoid cycle context -> subrepo -> cmdutil | |
|
1451 | from . import context | |
|
1452 | ||
|
1453 | rewriteutil.precheck(repo, [ctx.rev()], b'uncopy') | |
|
1454 | new_ctx = context.overlayworkingctx(repo) | |
|
1455 | new_ctx.setbase(ctx.p1()) | |
|
1456 | mergemod.graft(repo, ctx, wctx=new_ctx) | |
|
1457 | ||
|
1458 | match = scmutil.match(ctx, pats, opts) | |
|
1459 | ||
|
1460 | current_copies = ctx.p1copies() | |
|
1461 | current_copies.update(ctx.p2copies()) | |
|
1462 | ||
|
1463 | uipathfn = scmutil.getuipathfn(repo) | |
|
1464 | for f in ctx.walk(match): | |
|
1465 | if f in current_copies: | |
|
1466 | new_ctx[f].markcopied(None) | |
|
1467 | elif match.exact(f): | |
|
1468 | ui.warn( | |
|
1469 | _( | |
|
1470 | b'%s: not unmarking as copy - file is not marked as copied\n' | |
|
1471 | ) | |
|
1472 | % uipathfn(f) | |
|
1473 | ) | |
|
1474 | ||
|
1475 | if ctx.rev() is not None: | |
|
1476 | with repo.lock(): | |
|
1477 | mem_ctx = new_ctx.tomemctx_for_amend(ctx) | |
|
1478 | new_node = mem_ctx.commit() | |
|
1479 | ||
|
1480 | if repo.dirstate.p1() == ctx.node(): | |
|
1481 | with repo.dirstate.parentchange(): | |
|
1482 | scmutil.movedirstate(repo, repo[new_node]) | |
|
1483 | replacements = {ctx.node(): [new_node]} | |
|
1484 | scmutil.cleanupnodes( | |
|
1485 | repo, replacements, b'uncopy', fixphase=True | |
|
1486 | ) | |
|
1487 | ||
|
1488 | return | |
|
1489 | ||
|
1490 | pats = scmutil.expandpats(pats) | |
|
1491 | if not pats: | |
|
1492 | raise error.Abort(_(b'no source or destination specified')) | |
|
1493 | if len(pats) == 1: | |
|
1494 | raise error.Abort(_(b'no destination specified')) | |
|
1495 | dest = pats.pop() | |
|
1496 | ||
|
1429 | 1497 | def walkpat(pat): |
|
1430 | 1498 | srcs = [] |
|
1431 | if after: | |
|
1432 | badstates = b'?' | |
|
1433 | else: | |
|
1434 | badstates = b'?r' | |
|
1435 | m = scmutil.match(wctx, [pat], opts, globbed=True) | |
|
1436 | for abs in wctx.walk(m): | |
|
1437 | state = repo.dirstate[abs] | |
|
1499 | m = scmutil.match(ctx, [pat], opts, globbed=True) | |
|
1500 | for abs in ctx.walk(m): | |
|
1438 | 1501 | rel = uipathfn(abs) |
|
1439 | 1502 | exact = m.exact(abs) |
|
1440 |
if |
|
|
1441 |
if |
|
|
1442 | ui.warn(_(b'%s: not copying - file is not managed\n') % rel) | |
|
1443 |
if exact |
|
|
1503 | if abs not in ctx: | |
|
1504 | if abs in pctx: | |
|
1505 | if not after: | |
|
1506 | if exact: | |
|
1444 | 1507 | ui.warn( |
|
1445 | 1508 | _( |
|
1446 |
b'%s: not copying - file has been marked |
|
|
1447 | b' remove\n' | |
|
1509 | b'%s: not copying - file has been marked ' | |
|
1510 | b'for remove\n' | |
|
1448 | 1511 | ) |
|
1449 | 1512 | % rel |
|
1450 | 1513 | ) |
|
1451 | 1514 | continue |
|
1515 | else: | |
|
1516 | if exact: | |
|
1517 | ui.warn( | |
|
1518 | _(b'%s: not copying - file is not managed\n') % rel | |
|
1519 | ) | |
|
1520 | continue | |
|
1521 | ||
|
1452 | 1522 | # abs: hgsep |
|
1453 | 1523 | # rel: ossep |
|
1454 | 1524 | srcs.append((abs, rel, exact)) |
|
1455 | 1525 | return srcs |
|
1456 | 1526 | |
|
1527 | if ctx.rev() is not None: | |
|
1528 | rewriteutil.precheck(repo, [ctx.rev()], b'uncopy') | |
|
1529 | absdest = pathutil.canonpath(repo.root, cwd, dest) | |
|
1530 | if ctx.hasdir(absdest): | |
|
1531 | raise error.Abort( | |
|
1532 | _(b'%s: --at-rev does not support a directory as destination') | |
|
1533 | % uipathfn(absdest) | |
|
1534 | ) | |
|
1535 | if absdest not in ctx: | |
|
1536 | raise error.Abort( | |
|
1537 | _(b'%s: copy destination does not exist in %s') | |
|
1538 | % (uipathfn(absdest), ctx) | |
|
1539 | ) | |
|
1540 | ||
|
1541 | # avoid cycle context -> subrepo -> cmdutil | |
|
1542 | from . import context | |
|
1543 | ||
|
1544 | copylist = [] | |
|
1545 | for pat in pats: | |
|
1546 | srcs = walkpat(pat) | |
|
1547 | if not srcs: | |
|
1548 | continue | |
|
1549 | for abs, rel, exact in srcs: | |
|
1550 | copylist.append(abs) | |
|
1551 | ||
|
1552 | # TODO: Add support for `hg cp --at-rev . foo bar dir` and | |
|
1553 | # `hg cp --at-rev . dir1 dir2`, preferably unifying the code with the | |
|
1554 | # existing functions below. | |
|
1555 | if len(copylist) != 1: | |
|
1556 | raise error.Abort(_(b'--at-rev requires a single source')) | |
|
1557 | ||
|
1558 | new_ctx = context.overlayworkingctx(repo) | |
|
1559 | new_ctx.setbase(ctx.p1()) | |
|
1560 | mergemod.graft(repo, ctx, wctx=new_ctx) | |
|
1561 | ||
|
1562 | new_ctx.markcopied(absdest, copylist[0]) | |
|
1563 | ||
|
1564 | with repo.lock(): | |
|
1565 | mem_ctx = new_ctx.tomemctx_for_amend(ctx) | |
|
1566 | new_node = mem_ctx.commit() | |
|
1567 | ||
|
1568 | if repo.dirstate.p1() == ctx.node(): | |
|
1569 | with repo.dirstate.parentchange(): | |
|
1570 | scmutil.movedirstate(repo, repo[new_node]) | |
|
1571 | replacements = {ctx.node(): [new_node]} | |
|
1572 | scmutil.cleanupnodes(repo, replacements, b'copy', fixphase=True) | |
|
1573 | ||
|
1574 | return | |
|
1575 | ||
|
1457 | 1576 | # abssrc: hgsep |
|
1458 | 1577 | # relsrc: ossep |
|
1459 | 1578 | # otarget: ossep |
@@ -1583,13 +1702,13 b' def copy(ui, repo, pats, opts, rename=Fa' | |||
|
1583 | 1702 | |
|
1584 | 1703 | # fix up dirstate |
|
1585 | 1704 | scmutil.dirstatecopy( |
|
1586 |
ui, repo, |
|
|
1705 | ui, repo, ctx, abssrc, abstarget, dryrun=dryrun, cwd=cwd | |
|
1587 | 1706 | ) |
|
1588 | 1707 | if rename and not dryrun: |
|
1589 | 1708 | if not after and srcexists and not samefile: |
|
1590 | 1709 | rmdir = repo.ui.configbool(b'experimental', b'removeemptydirs') |
|
1591 | 1710 | repo.wvfs.unlinkpath(abssrc, rmdir=rmdir) |
|
1592 |
|
|
|
1711 | ctx.forget([abssrc]) | |
|
1593 | 1712 | |
|
1594 | 1713 | # pat: ossep |
|
1595 | 1714 | # dest ossep |
@@ -1659,12 +1778,6 b' def copy(ui, repo, pats, opts, rename=Fa' | |||
|
1659 | 1778 | res = lambda p: dest |
|
1660 | 1779 | return res |
|
1661 | 1780 | |
|
1662 | pats = scmutil.expandpats(pats) | |
|
1663 | if not pats: | |
|
1664 | raise error.Abort(_(b'no source or destination specified')) | |
|
1665 | if len(pats) == 1: | |
|
1666 | raise error.Abort(_(b'no destination specified')) | |
|
1667 | dest = pats.pop() | |
|
1668 | 1781 | destdirexists = os.path.isdir(dest) and not os.path.islink(dest) |
|
1669 | 1782 | if not destdirexists: |
|
1670 | 1783 | if len(pats) > 1 or matchmod.patkind(pats[0]): |
@@ -3012,7 +3125,7 b' def amend(ui, repo, old, extra, pats, op' | |||
|
3012 | 3125 | ms = mergemod.mergestate.read(repo) |
|
3013 | 3126 | mergeutil.checkunresolved(ms) |
|
3014 | 3127 | |
|
3015 |
filestoamend = |
|
|
3128 | filestoamend = {f for f in wctx.files() if matcher(f)} | |
|
3016 | 3129 | |
|
3017 | 3130 | changes = len(filestoamend) > 0 |
|
3018 | 3131 | if changes: |
@@ -3804,7 +3917,7 b' def _performrevert(' | |||
|
3804 | 3917 | # Apply changes |
|
3805 | 3918 | fp = stringio() |
|
3806 | 3919 | # chunks are serialized per file, but files aren't sorted |
|
3807 |
for f in sorted( |
|
|
3920 | for f in sorted({c.header.filename() for c in chunks if ishunk(c)}): | |
|
3808 | 3921 | prntstatusmsg(b'revert', f) |
|
3809 | 3922 | files = set() |
|
3810 | 3923 | for c in chunks: |
@@ -44,7 +44,7 b' try:' | |||
|
44 | 44 | b'cyan': (False, curses.COLOR_CYAN, b''), |
|
45 | 45 | b'white': (False, curses.COLOR_WHITE, b''), |
|
46 | 46 | } |
|
47 | except ImportError: | |
|
47 | except (ImportError, AttributeError): | |
|
48 | 48 | curses = None |
|
49 | 49 | _baseterminfoparams = {} |
|
50 | 50 |
@@ -876,7 +876,7 b' def _dobackout(ui, repo, node=None, rev=' | |||
|
876 | 876 | ) |
|
877 | 877 | overrides = {(b'ui', b'forcemerge'): opts.get(b'tool', b'')} |
|
878 | 878 | with ui.configoverride(overrides, b'backout'): |
|
879 |
return hg.merge(repo |
|
|
879 | return hg.merge(repo[b'tip']) | |
|
880 | 880 | return 0 |
|
881 | 881 | |
|
882 | 882 | |
@@ -1228,7 +1228,7 b' def bookmark(ui, repo, *names, **opts):' | |||
|
1228 | 1228 | |
|
1229 | 1229 | action = cmdutil.check_at_most_one_arg(opts, b'delete', b'rename', b'list') |
|
1230 | 1230 | if action: |
|
1231 | cmdutil.check_incompatible_arguments(opts, action, b'rev') | |
|
1231 | cmdutil.check_incompatible_arguments(opts, action, [b'rev']) | |
|
1232 | 1232 | elif names or rev: |
|
1233 | 1233 | action = b'add' |
|
1234 | 1234 | elif inactive: |
@@ -1236,7 +1236,9 b' def bookmark(ui, repo, *names, **opts):' | |||
|
1236 | 1236 | else: |
|
1237 | 1237 | action = b'list' |
|
1238 | 1238 | |
|
1239 |
cmdutil.check_incompatible_arguments( |
|
|
1239 | cmdutil.check_incompatible_arguments( | |
|
1240 | opts, b'inactive', [b'delete', b'list'] | |
|
1241 | ) | |
|
1240 | 1242 | if not names and action in {b'add', b'delete'}: |
|
1241 | 1243 | raise error.Abort(_(b"bookmark name required")) |
|
1242 | 1244 | |
@@ -2307,8 +2309,16 b' def continuecmd(ui, repo, **opts):' | |||
|
2307 | 2309 | @command( |
|
2308 | 2310 | b'copy|cp', |
|
2309 | 2311 | [ |
|
2312 | (b'', b'forget', None, _(b'unmark a file as copied')), | |
|
2310 | 2313 | (b'A', b'after', None, _(b'record a copy that has already occurred')), |
|
2311 | 2314 | ( |
|
2315 | b'', | |
|
2316 | b'at-rev', | |
|
2317 | b'', | |
|
2318 | _(b'(un)mark copies in the given revision (EXPERIMENTAL)'), | |
|
2319 | _(b'REV'), | |
|
2320 | ), | |
|
2321 | ( | |
|
2312 | 2322 | b'f', |
|
2313 | 2323 | b'force', |
|
2314 | 2324 | None, |
@@ -2331,8 +2341,11 b' def copy(ui, repo, *pats, **opts):' | |||
|
2331 | 2341 | exist in the working directory. If invoked with -A/--after, the |
|
2332 | 2342 | operation is recorded, but no copying is performed. |
|
2333 | 2343 | |
|
2334 | This command takes effect with the next commit. To undo a copy | |
|
2335 | before that, see :hg:`revert`. | |
|
2344 | To undo marking a file as copied, use --forget. With that option, | |
|
2345 | all given (positional) arguments are unmarked as copies. The destination | |
|
2346 | file(s) will be left in place (still tracked). | |
|
2347 | ||
|
2348 | This command takes effect with the next commit by default. | |
|
2336 | 2349 | |
|
2337 | 2350 | Returns 0 on success, 1 if errors are encountered. |
|
2338 | 2351 | """ |
@@ -2938,7 +2951,7 b' def graft(ui, repo, *revs, **opts):' | |||
|
2938 | 2951 | |
|
2939 | 2952 | See :hg:`help revisions` for more about specifying revisions. |
|
2940 | 2953 | |
|
2941 | Returns 0 on successful completion. | |
|
2954 | Returns 0 on successful completion, 1 if there are unresolved files. | |
|
2942 | 2955 | ''' |
|
2943 | 2956 | with repo.wlock(): |
|
2944 | 2957 | return _dograft(ui, repo, *revs, **opts) |
@@ -3199,10 +3212,9 b' def _dograft(ui, repo, *revs, **opts):' | |||
|
3199 | 3212 | statedata[b'nodes'] = nodes |
|
3200 | 3213 | stateversion = 1 |
|
3201 | 3214 | graftstate.save(stateversion, statedata) |
|
3202 |
|
|
|
3203 | raise error.Abort( | |
|
3204 | _(b"unresolved conflicts, can't continue"), hint=hint | |
|
3205 | ) | |
|
3215 | ui.error(_(b"abort: unresolved conflicts, can't continue\n")) | |
|
3216 | ui.error(_(b"(use 'hg resolve' and 'hg graft --continue')\n")) | |
|
3217 | return 1 | |
|
3206 | 3218 | else: |
|
3207 | 3219 | cont = False |
|
3208 | 3220 | |
@@ -3708,9 +3720,9 b' def heads(ui, repo, *branchrevs, **opts)' | |||
|
3708 | 3720 | heads = [repo[h] for h in heads] |
|
3709 | 3721 | |
|
3710 | 3722 | if branchrevs: |
|
3711 |
branches = |
|
|
3723 | branches = { | |
|
3712 | 3724 | repo[r].branch() for r in scmutil.revrange(repo, branchrevs) |
|
3713 |
|
|
|
3725 | } | |
|
3714 | 3726 | heads = [h for h in heads if h.branch() in branches] |
|
3715 | 3727 | |
|
3716 | 3728 | if opts.get(b'active') and branchrevs: |
@@ -3718,7 +3730,7 b' def heads(ui, repo, *branchrevs, **opts)' | |||
|
3718 | 3730 | heads = [h for h in heads if h.node() in dagheads] |
|
3719 | 3731 | |
|
3720 | 3732 | if branchrevs: |
|
3721 |
haveheads = |
|
|
3733 | haveheads = {h.branch() for h in heads} | |
|
3722 | 3734 | if branches - haveheads: |
|
3723 | 3735 | headless = b', '.join(b for b in branches - haveheads) |
|
3724 | 3736 | msg = _(b'no open branch heads found on branches %s') |
@@ -4847,6 +4859,7 b' def merge(ui, repo, node=None, **opts):' | |||
|
4847 | 4859 | abort = opts.get(b'abort') |
|
4848 | 4860 | if abort and repo.dirstate.p2() == nullid: |
|
4849 | 4861 | cmdutil.wrongtooltocontinue(repo, _(b'merge')) |
|
4862 | cmdutil.check_incompatible_arguments(opts, b'abort', [b'rev', b'preview']) | |
|
4850 | 4863 | if abort: |
|
4851 | 4864 | state = cmdutil.getunfinishedstate(repo) |
|
4852 | 4865 | if state and state._opname != b'merge': |
@@ -4856,19 +4869,16 b' def merge(ui, repo, node=None, **opts):' | |||
|
4856 | 4869 | ) |
|
4857 | 4870 | if node: |
|
4858 | 4871 | raise error.Abort(_(b"cannot specify a node with --abort")) |
|
4859 | if opts.get(b'rev'): | |
|
4860 | raise error.Abort(_(b"cannot specify both --rev and --abort")) | |
|
4861 | if opts.get(b'preview'): | |
|
4862 | raise error.Abort(_(b"cannot specify --preview with --abort")) | |
|
4872 | return hg.abortmerge(repo.ui, repo) | |
|
4873 | ||
|
4863 | 4874 | if opts.get(b'rev') and node: |
|
4864 | 4875 | raise error.Abort(_(b"please specify just one revision")) |
|
4865 | 4876 | if not node: |
|
4866 | 4877 | node = opts.get(b'rev') |
|
4867 | 4878 | |
|
4868 | 4879 | if node: |
|
4869 |
|
|
|
4870 | ||
|
4871 | if not node and not abort: | |
|
4880 | ctx = scmutil.revsingle(repo, node) | |
|
4881 | else: | |
|
4872 | 4882 | if ui.configbool(b'commands', b'merge.require-rev'): |
|
4873 | 4883 | raise error.Abort( |
|
4874 | 4884 | _( |
@@ -4876,12 +4886,15 b' def merge(ui, repo, node=None, **opts):' | |||
|
4876 | 4886 | b'with' |
|
4877 | 4887 | ) |
|
4878 | 4888 | ) |
|
4879 |
|
|
|
4889 | ctx = repo[destutil.destmerge(repo)] | |
|
4890 | ||
|
4891 | if ctx.node() is None: | |
|
4892 | raise error.Abort(_(b'merging with the working copy has no effect')) | |
|
4880 | 4893 | |
|
4881 | 4894 | if opts.get(b'preview'): |
|
4882 | 4895 | # find nodes that are ancestors of p2 but not of p1 |
|
4883 |
p1 = repo |
|
|
4884 | p2 = node | |
|
4896 | p1 = repo[b'.'].node() | |
|
4897 | p2 = ctx.node() | |
|
4885 | 4898 | nodes = repo.changelog.findmissing(common=[p1], heads=[p2]) |
|
4886 | 4899 | |
|
4887 | 4900 | displayer = logcmdutil.changesetdisplayer(ui, repo, opts) |
@@ -4895,14 +4908,7 b' def merge(ui, repo, node=None, **opts):' | |||
|
4895 | 4908 | with ui.configoverride(overrides, b'merge'): |
|
4896 | 4909 | force = opts.get(b'force') |
|
4897 | 4910 | labels = [b'working copy', b'merge rev'] |
|
4898 | return hg.merge( | |
|
4899 | repo, | |
|
4900 | node, | |
|
4901 | force=force, | |
|
4902 | mergeforce=force, | |
|
4903 | labels=labels, | |
|
4904 | abort=abort, | |
|
4905 | ) | |
|
4911 | return hg.merge(ctx, force=force, labels=labels) | |
|
4906 | 4912 | |
|
4907 | 4913 | |
|
4908 | 4914 | statemod.addunfinished( |
@@ -5337,6 +5343,7 b' def postincoming(ui, repo, modheads, opt' | |||
|
5337 | 5343 | None, |
|
5338 | 5344 | _(b'run even when remote repository is unrelated'), |
|
5339 | 5345 | ), |
|
5346 | (b'', b'confirm', None, _(b'confirm pull before applying changes'),), | |
|
5340 | 5347 | ( |
|
5341 | 5348 | b'r', |
|
5342 | 5349 | b'rev', |
@@ -5453,6 +5460,7 b' def pull(ui, repo, source=b"default", **' | |||
|
5453 | 5460 | force=opts.get(b'force'), |
|
5454 | 5461 | bookmarks=opts.get(b'bookmark', ()), |
|
5455 | 5462 | opargs=pullopargs, |
|
5463 | confirm=opts.get(b'confirm'), | |
|
5456 | 5464 | ).cgresult |
|
5457 | 5465 | |
|
5458 | 5466 | # brev is a name, which might be a bookmark to be activated at |
@@ -5671,7 +5679,7 b' def push(ui, repo, dest=None, **opts):' | |||
|
5671 | 5679 | |
|
5672 | 5680 | @command( |
|
5673 | 5681 | b'recover', |
|
5674 |
[(b'', b'verify', |
|
|
5682 | [(b'', b'verify', False, b"run `hg verify` after successful recover"),], | |
|
5675 | 5683 | helpcategory=command.CATEGORY_MAINTENANCE, |
|
5676 | 5684 | ) |
|
5677 | 5685 | def recover(ui, repo, **opts): |
@@ -5946,6 +5954,8 b' def resolve(ui, repo, *pats, **opts):' | |||
|
5946 | 5954 | if not m(f): |
|
5947 | 5955 | continue |
|
5948 | 5956 | |
|
5957 | if ms[f] == mergemod.MERGE_RECORD_MERGED_OTHER: | |
|
5958 | continue | |
|
5949 | 5959 | label, key = mergestateinfo[ms[f]] |
|
5950 | 5960 | fm.startitem() |
|
5951 | 5961 | fm.context(ctx=wctx) |
@@ -5993,6 +6003,9 b' def resolve(ui, repo, *pats, **opts):' | |||
|
5993 | 6003 | |
|
5994 | 6004 | didwork = True |
|
5995 | 6005 | |
|
6006 | if ms[f] == mergemod.MERGE_RECORD_MERGED_OTHER: | |
|
6007 | continue | |
|
6008 | ||
|
5996 | 6009 | # don't let driver-resolved files be marked, and run the conclude |
|
5997 | 6010 | # step if asked to resolve |
|
5998 | 6011 | if ms[f] == mergemod.MERGE_RECORD_DRIVER_RESOLVED: |
@@ -6648,7 +6661,12 b' def shelve(ui, repo, *pats, **opts):' | |||
|
6648 | 6661 | (b'i', b'ignored', None, _(b'show only ignored files')), |
|
6649 | 6662 | (b'n', b'no-status', None, _(b'hide status prefix')), |
|
6650 | 6663 | (b't', b'terse', _NOTTERSE, _(b'show the terse output (EXPERIMENTAL)')), |
|
6651 | (b'C', b'copies', None, _(b'show source of copied files')), | |
|
6664 | ( | |
|
6665 | b'C', | |
|
6666 | b'copies', | |
|
6667 | None, | |
|
6668 | _(b'show source of copied files (DEFAULT: ui.statuscopies)'), | |
|
6669 | ), | |
|
6652 | 6670 | ( |
|
6653 | 6671 | b'0', |
|
6654 | 6672 | b'print0', |
@@ -7571,7 +7589,7 b' def unshelve(ui, repo, *shelved, **opts)' | |||
|
7571 | 7589 | unshelved. |
|
7572 | 7590 | """ |
|
7573 | 7591 | with repo.wlock(): |
|
7574 |
return shelvemod. |
|
|
7592 | return shelvemod.unshelvecmd(ui, repo, *shelved, **opts) | |
|
7575 | 7593 | |
|
7576 | 7594 | |
|
7577 | 7595 | statemod.addunfinished( |
@@ -7653,6 +7671,7 b' def update(ui, repo, node=None, **opts):' | |||
|
7653 | 7671 | |
|
7654 | 7672 | Returns 0 on success, 1 if there are unresolved files. |
|
7655 | 7673 | """ |
|
7674 | cmdutil.check_at_most_one_arg(opts, 'clean', 'check', 'merge') | |
|
7656 | 7675 | rev = opts.get('rev') |
|
7657 | 7676 | date = opts.get('date') |
|
7658 | 7677 | clean = opts.get('clean') |
@@ -7674,14 +7693,6 b' def update(ui, repo, node=None, **opts):' | |||
|
7674 | 7693 | if date and rev is not None: |
|
7675 | 7694 | raise error.Abort(_(b"you can't specify a revision and a date")) |
|
7676 | 7695 | |
|
7677 | if len([x for x in (clean, check, merge) if x]) > 1: | |
|
7678 | raise error.Abort( | |
|
7679 | _( | |
|
7680 | b"can only specify one of -C/--clean, -c/--check, " | |
|
7681 | b"or -m/--merge" | |
|
7682 | ) | |
|
7683 | ) | |
|
7684 | ||
|
7685 | 7696 | updatecheck = None |
|
7686 | 7697 | if check: |
|
7687 | 7698 | updatecheck = b'abort' |
@@ -545,6 +545,10 b' class unixforkingservice(object):' | |||
|
545 | 545 | if maxlen < 0: |
|
546 | 546 | raise error.Abort(_(b'negative max-repo-cache size not allowed')) |
|
547 | 547 | self._repoloader = repocache.repoloader(ui, maxlen) |
|
548 | # attempt to avoid crash in CoreFoundation when using chg after fix in | |
|
549 | # a89381e04c58 | |
|
550 | if pycompat.isdarwin: | |
|
551 | procutil.gui() | |
|
548 | 552 | |
|
549 | 553 | def init(self): |
|
550 | 554 | self._sock = socket.socket(socket.AF_UNIX) |
@@ -405,6 +405,21 b' coreconfigitem(' | |||
|
405 | 405 | coreconfigitem( |
|
406 | 406 | b'devel', b'legacy.exchange', default=list, |
|
407 | 407 | ) |
|
408 | # TODO before getting `persistent-nodemap` out of experimental | |
|
409 | # | |
|
410 | # * decide for a "status" of the persistent nodemap and associated location | |
|
411 | # - part of the store next the revlog itself (new requirements) | |
|
412 | # - part of the cache directory | |
|
413 | # - part of an `index` directory | |
|
414 | # (https://www.mercurial-scm.org/wiki/ComputedIndexPlan) | |
|
415 | # * do we want to use this for more than just changelog? if so we need: | |
|
416 | # - simpler "pending" logic for them | |
|
417 | # - double check the memory story (we dont want to keep all revlog in memory) | |
|
418 | # - think about the naming scheme if we are in "cache" | |
|
419 | # * increment the version format to "1" and freeze it. | |
|
420 | coreconfigitem( | |
|
421 | b'devel', b'persistent-nodemap', default=False, | |
|
422 | ) | |
|
408 | 423 | coreconfigitem( |
|
409 | 424 | b'devel', b'servercafile', default=b'', |
|
410 | 425 | ) |
@@ -660,6 +675,12 b' coreconfigitem(' | |||
|
660 | 675 | b'experimental', b'rust.index', default=False, |
|
661 | 676 | ) |
|
662 | 677 | coreconfigitem( |
|
678 | b'experimental', b'exp-persistent-nodemap', default=False, | |
|
679 | ) | |
|
680 | coreconfigitem( | |
|
681 | b'experimental', b'exp-persistent-nodemap.mmap', default=True, | |
|
682 | ) | |
|
683 | coreconfigitem( | |
|
663 | 684 | b'experimental', b'server.filesdata.recommended-batch-size', default=50000, |
|
664 | 685 | ) |
|
665 | 686 | coreconfigitem( |
@@ -750,7 +771,7 b' coreconfigitem(' | |||
|
750 | 771 | coreconfigitem( |
|
751 | 772 | b'format', |
|
752 | 773 | b'revlog-compression', |
|
753 | default=b'zlib', | |
|
774 | default=lambda: [b'zlib'], | |
|
754 | 775 | alias=[(b'experimental', b'format.compression')], |
|
755 | 776 | ) |
|
756 | 777 | coreconfigitem( |
@@ -1044,6 +1065,9 b' coreconfigitem(' | |||
|
1044 | 1065 | b'progress', b'width', default=dynamicdefault, |
|
1045 | 1066 | ) |
|
1046 | 1067 | coreconfigitem( |
|
1068 | b'pull', b'confirm', default=False, | |
|
1069 | ) | |
|
1070 | coreconfigitem( | |
|
1047 | 1071 | b'push', b'pushvars.server', default=False, |
|
1048 | 1072 | ) |
|
1049 | 1073 | coreconfigitem( |
@@ -1107,7 +1131,7 b' coreconfigitem(' | |||
|
1107 | 1131 | b'server', b'compressionengines', default=list, |
|
1108 | 1132 | ) |
|
1109 | 1133 | coreconfigitem( |
|
1110 |
b'server', b'concurrent-push-mode', default=b' |
|
|
1134 | b'server', b'concurrent-push-mode', default=b'check-related', | |
|
1111 | 1135 | ) |
|
1112 | 1136 | coreconfigitem( |
|
1113 | 1137 | b'server', b'disablefullbundle', default=False, |
@@ -267,7 +267,7 b' class basectx(object):' | |||
|
267 | 267 | def _fileinfo(self, path): |
|
268 | 268 | if '_manifest' in self.__dict__: |
|
269 | 269 | try: |
|
270 |
return self._manifest |
|
|
270 | return self._manifest.find(path) | |
|
271 | 271 | except KeyError: |
|
272 | 272 | raise error.ManifestLookupError( |
|
273 | 273 | self._node, path, _(b'not found in manifest') |
@@ -2357,8 +2357,7 b' class overlayworkingctx(committablectx):' | |||
|
2357 | 2357 | # Test the other direction -- that this path from p2 isn't a directory |
|
2358 | 2358 | # in p1 (test that p1 doesn't have any paths matching `path/*`). |
|
2359 | 2359 | match = self.match([path], default=b'path') |
|
2360 |
m |
|
|
2361 | mfiles = matches.keys() | |
|
2360 | mfiles = list(self.p1().manifest().walk(match)) | |
|
2362 | 2361 | if len(mfiles) > 0: |
|
2363 | 2362 | if len(mfiles) == 1 and mfiles[0] == path: |
|
2364 | 2363 | return |
@@ -2488,6 +2487,17 b' class overlayworkingctx(committablectx):' | |||
|
2488 | 2487 | editor=editor, |
|
2489 | 2488 | ) |
|
2490 | 2489 | |
|
2490 | def tomemctx_for_amend(self, precursor): | |
|
2491 | extra = precursor.extra().copy() | |
|
2492 | extra[b'amend_source'] = precursor.hex() | |
|
2493 | return self.tomemctx( | |
|
2494 | text=precursor.description(), | |
|
2495 | branch=precursor.branch(), | |
|
2496 | extra=extra, | |
|
2497 | date=precursor.date(), | |
|
2498 | user=precursor.user(), | |
|
2499 | ) | |
|
2500 | ||
|
2491 | 2501 | def isdirty(self, path): |
|
2492 | 2502 | return path in self._cache |
|
2493 | 2503 |
@@ -403,13 +403,15 b' def pathcopies(x, y, match=None):' | |||
|
403 | 403 | ) |
|
404 | 404 | if x == y or not x or not y: |
|
405 | 405 | return {} |
|
406 | if y.rev() is None and x == y.p1(): | |
|
407 | if debug: | |
|
408 | repo.ui.debug(b'debug.copies: search mode: dirstate\n') | |
|
409 | # short-circuit to avoid issues with merge states | |
|
410 | return _dirstatecopies(repo, match) | |
|
406 | 411 | a = y.ancestor(x) |
|
407 | 412 | if a == x: |
|
408 | 413 | if debug: |
|
409 | 414 | repo.ui.debug(b'debug.copies: search mode: forward\n') |
|
410 | if y.rev() is None and x == y.p1(): | |
|
411 | # short-circuit to avoid issues with merge states | |
|
412 | return _dirstatecopies(repo, match) | |
|
413 | 415 | copies = _forwardcopies(x, y, match=match) |
|
414 | 416 | elif a == y: |
|
415 | 417 | if debug: |
@@ -452,44 +454,34 b' def mergecopies(repo, c1, c2, base):' | |||
|
452 | 454 | |
|
453 | 455 | ```other changed <file> which local deleted``` |
|
454 | 456 | |
|
455 | Returns five dicts: "copy", "movewithdir", "diverge", "renamedelete" and | |
|
456 | "dirmove". | |
|
457 | Returns a tuple where: | |
|
457 | 458 | |
|
458 | "copy" is a mapping from destination name -> source name, | |
|
459 | where source is in c1 and destination is in c2 or vice-versa. | |
|
460 | ||
|
461 | "movewithdir" is a mapping from source name -> destination name, | |
|
462 | where the file at source present in one context but not the other | |
|
463 | needs to be moved to destination by the merge process, because the | |
|
464 | other context moved the directory it is in. | |
|
459 | "branch_copies" an instance of branch_copies. | |
|
465 | 460 | |
|
466 | 461 | "diverge" is a mapping of source name -> list of destination names |
|
467 | 462 | for divergent renames. |
|
468 | 463 | |
|
469 | "renamedelete" is a mapping of source name -> list of destination | |
|
470 | names for files deleted in c1 that were renamed in c2 or vice-versa. | |
|
471 | ||
|
472 | "dirmove" is a mapping of detected source dir -> destination dir renames. | |
|
473 | This is needed for handling changes to new files previously grafted into | |
|
474 | renamed directories. | |
|
475 | ||
|
476 | 464 | This function calls different copytracing algorithms based on config. |
|
477 | 465 | """ |
|
478 | 466 | # avoid silly behavior for update from empty dir |
|
479 | 467 | if not c1 or not c2 or c1 == c2: |
|
480 | return {}, {}, {}, {}, {} | |
|
468 | return branch_copies(), branch_copies(), {} | |
|
481 | 469 | |
|
482 | 470 | narrowmatch = c1.repo().narrowmatch() |
|
483 | 471 | |
|
484 | 472 | # avoid silly behavior for parent -> working dir |
|
485 | 473 | if c2.node() is None and c1.node() == repo.dirstate.p1(): |
|
486 | return _dirstatecopies(repo, narrowmatch), {}, {}, {}, {} | |
|
474 | return ( | |
|
475 | branch_copies(_dirstatecopies(repo, narrowmatch)), | |
|
476 | branch_copies(), | |
|
477 | {}, | |
|
478 | ) | |
|
487 | 479 | |
|
488 | 480 | copytracing = repo.ui.config(b'experimental', b'copytrace') |
|
489 | 481 | if stringutil.parsebool(copytracing) is False: |
|
490 | 482 | # stringutil.parsebool() returns None when it is unable to parse the |
|
491 | 483 | # value, so we should rely on making sure copytracing is on such cases |
|
492 | return {}, {}, {}, {}, {} | |
|
484 | return branch_copies(), branch_copies(), {} | |
|
493 | 485 | |
|
494 | 486 | if usechangesetcentricalgo(repo): |
|
495 | 487 | # The heuristics don't make sense when we need changeset-centric algos |
@@ -537,17 +529,47 b' def _checksinglesidecopies(' | |||
|
537 | 529 | if src not in m1: |
|
538 | 530 | # renamed on side 1, deleted on side 2 |
|
539 | 531 | renamedelete[src] = dsts1 |
|
532 | elif src not in mb: | |
|
533 | # Work around the "short-circuit to avoid issues with merge states" | |
|
534 | # thing in pathcopies(): pathcopies(x, y) can return a copy where the | |
|
535 | # destination doesn't exist in y. | |
|
536 | pass | |
|
540 | 537 | elif m2[src] != mb[src]: |
|
541 | 538 | if not _related(c2[src], base[src]): |
|
542 | 539 | return |
|
543 | 540 | # modified on side 2 |
|
544 | 541 | for dst in dsts1: |
|
545 | if dst not in m2: | |
|
546 | # dst not added on side 2 (handle as regular | |
|
547 | # "both created" case in manifestmerge otherwise) | |
|
548 | 542 |
|
|
549 | 543 | |
|
550 | 544 | |
|
545 | class branch_copies(object): | |
|
546 | """Information about copies made on one side of a merge/graft. | |
|
547 | ||
|
548 | "copy" is a mapping from destination name -> source name, | |
|
549 | where source is in c1 and destination is in c2 or vice-versa. | |
|
550 | ||
|
551 | "movewithdir" is a mapping from source name -> destination name, | |
|
552 | where the file at source present in one context but not the other | |
|
553 | needs to be moved to destination by the merge process, because the | |
|
554 | other context moved the directory it is in. | |
|
555 | ||
|
556 | "renamedelete" is a mapping of source name -> list of destination | |
|
557 | names for files deleted in c1 that were renamed in c2 or vice-versa. | |
|
558 | ||
|
559 | "dirmove" is a mapping of detected source dir -> destination dir renames. | |
|
560 | This is needed for handling changes to new files previously grafted into | |
|
561 | renamed directories. | |
|
562 | """ | |
|
563 | ||
|
564 | def __init__( | |
|
565 | self, copy=None, renamedelete=None, dirmove=None, movewithdir=None | |
|
566 | ): | |
|
567 | self.copy = {} if copy is None else copy | |
|
568 | self.renamedelete = {} if renamedelete is None else renamedelete | |
|
569 | self.dirmove = {} if dirmove is None else dirmove | |
|
570 | self.movewithdir = {} if movewithdir is None else movewithdir | |
|
571 | ||
|
572 | ||
|
551 | 573 | def _fullcopytracing(repo, c1, c2, base): |
|
552 | 574 | """ The full copytracing algorithm which finds all the new files that were |
|
553 | 575 | added from merge base up to the top commit and for each file it checks if |
@@ -563,6 +585,9 b' def _fullcopytracing(repo, c1, c2, base)' | |||
|
563 | 585 | copies1 = pathcopies(base, c1) |
|
564 | 586 | copies2 = pathcopies(base, c2) |
|
565 | 587 | |
|
588 | if not (copies1 or copies2): | |
|
589 | return branch_copies(), branch_copies(), {} | |
|
590 | ||
|
566 | 591 | inversecopies1 = {} |
|
567 | 592 | inversecopies2 = {} |
|
568 | 593 | for dst, src in copies1.items(): |
@@ -570,9 +595,11 b' def _fullcopytracing(repo, c1, c2, base)' | |||
|
570 | 595 | for dst, src in copies2.items(): |
|
571 | 596 | inversecopies2.setdefault(src, []).append(dst) |
|
572 | 597 | |
|
573 | copy = {} | |
|
598 | copy1 = {} | |
|
599 | copy2 = {} | |
|
574 | 600 | diverge = {} |
|
575 | renamedelete = {} | |
|
601 | renamedelete1 = {} | |
|
602 | renamedelete2 = {} | |
|
576 | 603 | allsources = set(inversecopies1) | set(inversecopies2) |
|
577 | 604 | for src in allsources: |
|
578 | 605 | dsts1 = inversecopies1.get(src) |
@@ -589,7 +616,8 b' def _fullcopytracing(repo, c1, c2, base)' | |||
|
589 | 616 | # and 'd' and deletes 'a'. |
|
590 | 617 | if dsts1 & dsts2: |
|
591 | 618 | for dst in dsts1 & dsts2: |
|
592 | copy[dst] = src | |
|
619 | copy1[dst] = src | |
|
620 | copy2[dst] = src | |
|
593 | 621 | else: |
|
594 | 622 | diverge[src] = sorted(dsts1 | dsts2) |
|
595 | 623 | elif src in m1 and src in m2: |
@@ -597,27 +625,21 b' def _fullcopytracing(repo, c1, c2, base)' | |||
|
597 | 625 | dsts1 = set(dsts1) |
|
598 | 626 | dsts2 = set(dsts2) |
|
599 | 627 | for dst in dsts1 & dsts2: |
|
600 | copy[dst] = src | |
|
628 | copy1[dst] = src | |
|
629 | copy2[dst] = src | |
|
601 | 630 | # TODO: Handle cases where it was renamed on one side and copied |
|
602 | 631 | # on the other side |
|
603 | 632 | elif dsts1: |
|
604 | 633 | # copied/renamed only on side 1 |
|
605 | 634 | _checksinglesidecopies( |
|
606 | src, dsts1, m1, m2, mb, c2, base, copy, renamedelete | |
|
635 | src, dsts1, m1, m2, mb, c2, base, copy1, renamedelete1 | |
|
607 | 636 | ) |
|
608 | 637 | elif dsts2: |
|
609 | 638 | # copied/renamed only on side 2 |
|
610 | 639 | _checksinglesidecopies( |
|
611 | src, dsts2, m2, m1, mb, c1, base, copy, renamedelete | |
|
640 | src, dsts2, m2, m1, mb, c1, base, copy2, renamedelete2 | |
|
612 | 641 | ) |
|
613 | 642 | |
|
614 | renamedeleteset = set() | |
|
615 | divergeset = set() | |
|
616 | for dsts in diverge.values(): | |
|
617 | divergeset.update(dsts) | |
|
618 | for dsts in renamedelete.values(): | |
|
619 | renamedeleteset.update(dsts) | |
|
620 | ||
|
621 | 643 | # find interesting file sets from manifests |
|
622 | 644 | addedinm1 = m1.filesnotin(mb, repo.narrowmatch()) |
|
623 | 645 | addedinm2 = m2.filesnotin(mb, repo.narrowmatch()) |
@@ -630,33 +652,60 b' def _fullcopytracing(repo, c1, c2, base)' | |||
|
630 | 652 | if u2: |
|
631 | 653 | repo.ui.debug(b"%s:\n %s\n" % (header % b'other', b"\n ".join(u2))) |
|
632 | 654 | |
|
633 | fullcopy = copies1.copy() | |
|
634 | fullcopy.update(copies2) | |
|
635 | if not fullcopy: | |
|
636 | return copy, {}, diverge, renamedelete, {} | |
|
655 | if repo.ui.debugflag: | |
|
656 | renamedeleteset = set() | |
|
657 | divergeset = set() | |
|
658 | for dsts in diverge.values(): | |
|
659 | divergeset.update(dsts) | |
|
660 | for dsts in renamedelete1.values(): | |
|
661 | renamedeleteset.update(dsts) | |
|
662 | for dsts in renamedelete2.values(): | |
|
663 | renamedeleteset.update(dsts) | |
|
637 | 664 | |
|
638 | if repo.ui.debugflag: | |
|
639 | 665 | repo.ui.debug( |
|
640 | 666 | b" all copies found (* = to merge, ! = divergent, " |
|
641 | 667 | b"% = renamed and deleted):\n" |
|
642 | 668 | ) |
|
643 | for f in sorted(fullcopy): | |
|
669 | for side, copies in ((b"local", copies1), (b"remote", copies2)): | |
|
670 | if not copies: | |
|
671 | continue | |
|
672 | repo.ui.debug(b" on %s side:\n" % side) | |
|
673 | for f in sorted(copies): | |
|
644 | 674 | note = b"" |
|
645 | if f in copy: | |
|
675 | if f in copy1 or f in copy2: | |
|
646 | 676 | note += b"*" |
|
647 | 677 | if f in divergeset: |
|
648 | 678 | note += b"!" |
|
649 | 679 | if f in renamedeleteset: |
|
650 | 680 | note += b"%" |
|
651 | 681 | repo.ui.debug( |
|
652 |
b" src: '%s' -> dst: '%s' %s\n" % ( |
|
|
682 | b" src: '%s' -> dst: '%s' %s\n" % (copies[f], f, note) | |
|
653 | 683 | ) |
|
684 | del renamedeleteset | |
|
654 | 685 | del divergeset |
|
655 | 686 | |
|
656 | 687 | repo.ui.debug(b" checking for directory renames\n") |
|
657 | 688 | |
|
689 | dirmove1, movewithdir2 = _dir_renames(repo, c1, copy1, copies1, u2) | |
|
690 | dirmove2, movewithdir1 = _dir_renames(repo, c2, copy2, copies2, u1) | |
|
691 | ||
|
692 | branch_copies1 = branch_copies(copy1, renamedelete1, dirmove1, movewithdir1) | |
|
693 | branch_copies2 = branch_copies(copy2, renamedelete2, dirmove2, movewithdir2) | |
|
694 | ||
|
695 | return branch_copies1, branch_copies2, diverge | |
|
696 | ||
|
697 | ||
|
698 | def _dir_renames(repo, ctx, copy, fullcopy, addedfiles): | |
|
699 | """Finds moved directories and files that should move with them. | |
|
700 | ||
|
701 | ctx: the context for one of the sides | |
|
702 | copy: files copied on the same side (as ctx) | |
|
703 | fullcopy: files copied on the same side (as ctx), including those that | |
|
704 | merge.manifestmerge() won't care about | |
|
705 | addedfiles: added files on the other side (compared to ctx) | |
|
706 | """ | |
|
658 | 707 | # generate a directory move map |
|
659 |
d |
|
|
708 | d = ctx.dirs() | |
|
660 | 709 | invalid = set() |
|
661 | 710 | dirmove = {} |
|
662 | 711 | |
@@ -667,12 +716,9 b' def _fullcopytracing(repo, c1, c2, base)' | |||
|
667 | 716 | if dsrc in invalid: |
|
668 | 717 | # already seen to be uninteresting |
|
669 | 718 | continue |
|
670 |
elif dsrc in d |
|
|
719 | elif dsrc in d and ddst in d: | |
|
671 | 720 | # directory wasn't entirely moved locally |
|
672 | 721 | invalid.add(dsrc) |
|
673 | elif dsrc in d2 and ddst in d2: | |
|
674 | # directory wasn't entirely moved remotely | |
|
675 | invalid.add(dsrc) | |
|
676 | 722 | elif dsrc in dirmove and dirmove[dsrc] != ddst: |
|
677 | 723 | # files from the same directory moved to two different places |
|
678 | 724 | invalid.add(dsrc) |
@@ -683,10 +729,10 b' def _fullcopytracing(repo, c1, c2, base)' | |||
|
683 | 729 | for i in invalid: |
|
684 | 730 | if i in dirmove: |
|
685 | 731 | del dirmove[i] |
|
686 |
del d |
|
|
732 | del d, invalid | |
|
687 | 733 | |
|
688 | 734 | if not dirmove: |
|
689 | return copy, {}, diverge, renamedelete, {} | |
|
735 | return {}, {} | |
|
690 | 736 | |
|
691 | 737 | dirmove = {k + b"/": v + b"/" for k, v in pycompat.iteritems(dirmove)} |
|
692 | 738 | |
@@ -697,7 +743,7 b' def _fullcopytracing(repo, c1, c2, base)' | |||
|
697 | 743 | |
|
698 | 744 | movewithdir = {} |
|
699 | 745 | # check unaccounted nonoverlapping files against directory moves |
|
700 |
for f in |
|
|
746 | for f in addedfiles: | |
|
701 | 747 | if f not in fullcopy: |
|
702 | 748 | for d in dirmove: |
|
703 | 749 | if f.startswith(d): |
@@ -711,7 +757,7 b' def _fullcopytracing(repo, c1, c2, base)' | |||
|
711 | 757 | ) |
|
712 | 758 | break |
|
713 | 759 | |
|
714 | return copy, movewithdir, diverge, renamedelete, dirmove | |
|
760 | return dirmove, movewithdir | |
|
715 | 761 | |
|
716 | 762 | |
|
717 | 763 | def _heuristicscopytracing(repo, c1, c2, base): |
@@ -744,8 +790,6 b' def _heuristicscopytracing(repo, c1, c2,' | |||
|
744 | 790 | if c2.rev() is None: |
|
745 | 791 | c2 = c2.p1() |
|
746 | 792 | |
|
747 | copies = {} | |
|
748 | ||
|
749 | 793 | changedfiles = set() |
|
750 | 794 | m1 = c1.manifest() |
|
751 | 795 | if not repo.revs(b'%d::%d', base.rev(), c2.rev()): |
@@ -765,10 +809,11 b' def _heuristicscopytracing(repo, c1, c2,' | |||
|
765 | 809 | changedfiles.update(ctx.files()) |
|
766 | 810 | ctx = ctx.p1() |
|
767 | 811 | |
|
812 | copies2 = {} | |
|
768 | 813 | cp = _forwardcopies(base, c2) |
|
769 | 814 | for dst, src in pycompat.iteritems(cp): |
|
770 | 815 | if src in m1: |
|
771 | copies[dst] = src | |
|
816 | copies2[dst] = src | |
|
772 | 817 | |
|
773 | 818 | # file is missing if it isn't present in the destination, but is present in |
|
774 | 819 | # the base and present in the source. |
@@ -777,6 +822,7 b' def _heuristicscopytracing(repo, c1, c2,' | |||
|
777 | 822 | filt = lambda f: f not in m1 and f in base and f in c2 |
|
778 | 823 | missingfiles = [f for f in changedfiles if filt(f)] |
|
779 | 824 | |
|
825 | copies1 = {} | |
|
780 | 826 | if missingfiles: |
|
781 | 827 | basenametofilename = collections.defaultdict(list) |
|
782 | 828 | dirnametofilename = collections.defaultdict(list) |
@@ -818,9 +864,9 b' def _heuristicscopytracing(repo, c1, c2,' | |||
|
818 | 864 | # if there are a few related copies then we'll merge |
|
819 | 865 | # changes into all of them. This matches the behaviour |
|
820 | 866 | # of upstream copytracing |
|
821 | copies[candidate] = f | |
|
867 | copies1[candidate] = f | |
|
822 | 868 | |
|
823 | return copies, {}, {}, {}, {} | |
|
869 | return branch_copies(copies1), branch_copies(copies2), {} | |
|
824 | 870 | |
|
825 | 871 | |
|
826 | 872 | def _related(f1, f2): |
@@ -63,13 +63,13 b' try:' | |||
|
63 | 63 | import curses.ascii |
|
64 | 64 | |
|
65 | 65 | curses.error |
|
66 | except ImportError: | |
|
66 | except (ImportError, AttributeError): | |
|
67 | 67 | # I have no idea if wcurses works with crecord... |
|
68 | 68 | try: |
|
69 | 69 | import wcurses as curses |
|
70 | 70 | |
|
71 | 71 | curses.error |
|
72 | except ImportError: | |
|
72 | except (ImportError, AttributeError): | |
|
73 | 73 | # wcurses is not shipped on Windows by default, or python is not |
|
74 | 74 | # compiled with curses |
|
75 | 75 | curses = False |
@@ -274,6 +274,247 b' def descendantrevs(revs, revsfn, parentr' | |||
|
274 | 274 | break |
|
275 | 275 | |
|
276 | 276 | |
|
277 | class subsetparentswalker(object): | |
|
278 | r"""Scan adjacent ancestors in the graph given by the subset | |
|
279 | ||
|
280 | This computes parent-child relations in the sub graph filtered by | |
|
281 | a revset. Primary use case is to draw a revisions graph. | |
|
282 | ||
|
283 | In the following example, we consider that the node 'f' has edges to all | |
|
284 | ancestor nodes, but redundant paths are eliminated. The edge 'f'->'b' | |
|
285 | is eliminated because there is a path 'f'->'c'->'b' for example. | |
|
286 | ||
|
287 | - d - e - | |
|
288 | / \ | |
|
289 | a - b - c - f | |
|
290 | ||
|
291 | If the node 'c' is filtered out, the edge 'f'->'b' is activated. | |
|
292 | ||
|
293 | - d - e - | |
|
294 | / \ | |
|
295 | a - b -(c)- f | |
|
296 | ||
|
297 | Likewise, if 'd' and 'e' are filtered out, this edge is fully eliminated | |
|
298 | since there is a path 'f'->'c'->'b'->'a' for 'f'->'a'. | |
|
299 | ||
|
300 | (d) (e) | |
|
301 | ||
|
302 | a - b - c - f | |
|
303 | ||
|
304 | Implementation-wise, 'f' is passed down to 'a' as unresolved through the | |
|
305 | 'f'->'e'->'d'->'a' path, whereas we do also remember that 'f' has already | |
|
306 | been resolved while walking down the 'f'->'c'->'b'->'a' path. When | |
|
307 | processing the node 'a', the unresolved 'f'->'a' path is eliminated as | |
|
308 | the 'f' end is marked as resolved. | |
|
309 | ||
|
310 | Ancestors are searched from the tipmost revision in the subset so the | |
|
311 | results can be cached. You should specify startrev to narrow the search | |
|
312 | space to ':startrev'. | |
|
313 | """ | |
|
314 | ||
|
315 | def __init__(self, repo, subset, startrev=None): | |
|
316 | if startrev is not None: | |
|
317 | subset = repo.revs(b'%d:null', startrev) & subset | |
|
318 | ||
|
319 | # equivalent to 'subset = subset.sorted(reverse=True)', but there's | |
|
320 | # no such function. | |
|
321 | fastdesc = subset.fastdesc | |
|
322 | if fastdesc: | |
|
323 | desciter = fastdesc() | |
|
324 | else: | |
|
325 | if not subset.isdescending() and not subset.istopo(): | |
|
326 | subset = smartset.baseset(subset) | |
|
327 | subset.sort(reverse=True) | |
|
328 | desciter = iter(subset) | |
|
329 | ||
|
330 | self._repo = repo | |
|
331 | self._changelog = repo.changelog | |
|
332 | self._subset = subset | |
|
333 | ||
|
334 | # scanning state (see _scanparents): | |
|
335 | self._tovisit = [] | |
|
336 | self._pendingcnt = {} | |
|
337 | self._pointers = {} | |
|
338 | self._parents = {} | |
|
339 | self._inputhead = nullrev # reassigned by self._advanceinput() | |
|
340 | self._inputtail = desciter | |
|
341 | self._bottomrev = nullrev | |
|
342 | self._advanceinput() | |
|
343 | ||
|
344 | def parentsset(self, rev): | |
|
345 | """Look up parents of the given revision in the subset, and returns | |
|
346 | as a smartset""" | |
|
347 | return smartset.baseset(self.parents(rev)) | |
|
348 | ||
|
349 | def parents(self, rev): | |
|
350 | """Look up parents of the given revision in the subset | |
|
351 | ||
|
352 | The returned revisions are sorted by parent index (p1/p2). | |
|
353 | """ | |
|
354 | self._scanparents(rev) | |
|
355 | return [r for _c, r in sorted(self._parents.get(rev, []))] | |
|
356 | ||
|
357 | def _parentrevs(self, rev): | |
|
358 | try: | |
|
359 | revs = self._changelog.parentrevs(rev) | |
|
360 | if revs[-1] == nullrev: | |
|
361 | return revs[:-1] | |
|
362 | return revs | |
|
363 | except error.WdirUnsupported: | |
|
364 | return tuple(pctx.rev() for pctx in self._repo[None].parents()) | |
|
365 | ||
|
366 | def _advanceinput(self): | |
|
367 | """Advance the input iterator and set the next revision to _inputhead""" | |
|
368 | if self._inputhead < nullrev: | |
|
369 | return | |
|
370 | try: | |
|
371 | self._inputhead = next(self._inputtail) | |
|
372 | except StopIteration: | |
|
373 | self._bottomrev = self._inputhead | |
|
374 | self._inputhead = nullrev - 1 | |
|
375 | ||
|
376 | def _scanparents(self, stoprev): | |
|
377 | """Scan ancestors until the parents of the specified stoprev are | |
|
378 | resolved""" | |
|
379 | ||
|
380 | # 'tovisit' is the queue of the input revisions and their ancestors. | |
|
381 | # It will be populated incrementally to minimize the initial cost | |
|
382 | # of computing the given subset. | |
|
383 | # | |
|
384 | # For to-visit revisions, we keep track of | |
|
385 | # - the number of the unresolved paths: pendingcnt[rev], | |
|
386 | # - dict of the unresolved descendants and chains: pointers[rev][0], | |
|
387 | # - set of the already resolved descendants: pointers[rev][1]. | |
|
388 | # | |
|
389 | # When a revision is visited, 'pointers[rev]' should be popped and | |
|
390 | # propagated to its parents accordingly. | |
|
391 | # | |
|
392 | # Once all pending paths have been resolved, 'pendingcnt[rev]' becomes | |
|
393 | # 0 and 'parents[rev]' contains the unsorted list of parent revisions | |
|
394 | # and p1/p2 chains (excluding linear paths.) The p1/p2 chains will be | |
|
395 | # used as a sort key preferring p1. 'len(chain)' should be the number | |
|
396 | # of merges between two revisions. | |
|
397 | ||
|
398 | subset = self._subset | |
|
399 | tovisit = self._tovisit # heap queue of [-rev] | |
|
400 | pendingcnt = self._pendingcnt # {rev: count} for visited revisions | |
|
401 | pointers = self._pointers # {rev: [{unresolved_rev: chain}, resolved]} | |
|
402 | parents = self._parents # {rev: [(chain, rev)]} | |
|
403 | ||
|
404 | while tovisit or self._inputhead >= nullrev: | |
|
405 | if pendingcnt.get(stoprev) == 0: | |
|
406 | return | |
|
407 | ||
|
408 | # feed greater revisions from input set to queue | |
|
409 | if not tovisit: | |
|
410 | heapq.heappush(tovisit, -self._inputhead) | |
|
411 | self._advanceinput() | |
|
412 | while self._inputhead >= -tovisit[0]: | |
|
413 | heapq.heappush(tovisit, -self._inputhead) | |
|
414 | self._advanceinput() | |
|
415 | ||
|
416 | rev = -heapq.heappop(tovisit) | |
|
417 | if rev < self._bottomrev: | |
|
418 | return | |
|
419 | if rev in pendingcnt and rev not in pointers: | |
|
420 | continue # already visited | |
|
421 | ||
|
422 | curactive = rev in subset | |
|
423 | pendingcnt.setdefault(rev, 0) # mark as visited | |
|
424 | if curactive: | |
|
425 | assert rev not in parents | |
|
426 | parents[rev] = [] | |
|
427 | unresolved, resolved = pointers.pop(rev, ({}, set())) | |
|
428 | ||
|
429 | if curactive: | |
|
430 | # reached to active rev, resolve pending descendants' parents | |
|
431 | for r, c in unresolved.items(): | |
|
432 | pendingcnt[r] -= 1 | |
|
433 | assert pendingcnt[r] >= 0 | |
|
434 | if r in resolved: | |
|
435 | continue # eliminate redundant path | |
|
436 | parents[r].append((c, rev)) | |
|
437 | # mark the descendant 'r' as resolved through this path if | |
|
438 | # there are still pending pointers. the 'resolved' set may | |
|
439 | # be concatenated later at a fork revision. | |
|
440 | if pendingcnt[r] > 0: | |
|
441 | resolved.add(r) | |
|
442 | unresolved.clear() | |
|
443 | # occasionally clean resolved markers. otherwise the set | |
|
444 | # would grow indefinitely. | |
|
445 | resolved = {r for r in resolved if pendingcnt[r] > 0} | |
|
446 | ||
|
447 | parentrevs = self._parentrevs(rev) | |
|
448 | bothparentsactive = all(p in subset for p in parentrevs) | |
|
449 | ||
|
450 | # set up or propagate tracking pointers if | |
|
451 | # - one of the parents is not active, | |
|
452 | # - or descendants' parents are unresolved. | |
|
453 | if not bothparentsactive or unresolved or resolved: | |
|
454 | if len(parentrevs) <= 1: | |
|
455 | # can avoid copying the tracking pointer | |
|
456 | parentpointers = [(unresolved, resolved)] | |
|
457 | else: | |
|
458 | parentpointers = [ | |
|
459 | (unresolved, resolved), | |
|
460 | (unresolved.copy(), resolved.copy()), | |
|
461 | ] | |
|
462 | # 'rev' is a merge revision. increment the pending count | |
|
463 | # as the 'unresolved' dict will be duplicated, and append | |
|
464 | # p1/p2 code to the existing chains. | |
|
465 | for r in unresolved: | |
|
466 | pendingcnt[r] += 1 | |
|
467 | parentpointers[0][0][r] += b'1' | |
|
468 | parentpointers[1][0][r] += b'2' | |
|
469 | for i, p in enumerate(parentrevs): | |
|
470 | assert p < rev | |
|
471 | heapq.heappush(tovisit, -p) | |
|
472 | if p in pointers: | |
|
473 | # 'p' is a fork revision. concatenate tracking pointers | |
|
474 | # and decrement the pending count accordingly. | |
|
475 | knownunresolved, knownresolved = pointers[p] | |
|
476 | unresolved, resolved = parentpointers[i] | |
|
477 | for r, c in unresolved.items(): | |
|
478 | if r in knownunresolved: | |
|
479 | # unresolved at both paths | |
|
480 | pendingcnt[r] -= 1 | |
|
481 | assert pendingcnt[r] > 0 | |
|
482 | # take shorter chain | |
|
483 | knownunresolved[r] = min(c, knownunresolved[r]) | |
|
484 | else: | |
|
485 | knownunresolved[r] = c | |
|
486 | # simply propagate the 'resolved' set as deduplicating | |
|
487 | # 'unresolved' here would be slightly complicated. | |
|
488 | knownresolved.update(resolved) | |
|
489 | else: | |
|
490 | pointers[p] = parentpointers[i] | |
|
491 | ||
|
492 | # then, populate the active parents directly and add the current | |
|
493 | # 'rev' to the tracking pointers of the inactive parents. | |
|
494 | # 'pointers[p]' may be optimized out if both parents are active. | |
|
495 | chaincodes = [b''] if len(parentrevs) <= 1 else [b'1', b'2'] | |
|
496 | if curactive and bothparentsactive: | |
|
497 | for i, p in enumerate(parentrevs): | |
|
498 | c = chaincodes[i] | |
|
499 | parents[rev].append((c, p)) | |
|
500 | # no need to mark 'rev' as resolved since the 'rev' should | |
|
501 | # be fully resolved (i.e. pendingcnt[rev] == 0) | |
|
502 | assert pendingcnt[rev] == 0 | |
|
503 | elif curactive: | |
|
504 | for i, p in enumerate(parentrevs): | |
|
505 | unresolved, resolved = pointers[p] | |
|
506 | assert rev not in unresolved | |
|
507 | c = chaincodes[i] | |
|
508 | if p in subset: | |
|
509 | parents[rev].append((c, p)) | |
|
510 | # mark 'rev' as resolved through this path | |
|
511 | resolved.add(rev) | |
|
512 | else: | |
|
513 | pendingcnt[rev] += 1 | |
|
514 | unresolved[rev] = c | |
|
515 | assert 0 < pendingcnt[rev] <= 2 | |
|
516 | ||
|
517 | ||
|
277 | 518 | def _reachablerootspure(pfunc, minroot, roots, heads, includepath): |
|
278 | 519 | """See revlog.reachableroots""" |
|
279 | 520 | if not roots: |
@@ -11,8 +11,10 b' import codecs' | |||
|
11 | 11 | import collections |
|
12 | 12 | import difflib |
|
13 | 13 | import errno |
|
14 | import glob | |
|
14 | 15 | import operator |
|
15 | 16 | import os |
|
17 | import platform | |
|
16 | 18 | import random |
|
17 | 19 | import re |
|
18 | 20 | import socket |
@@ -27,7 +29,6 b' from .i18n import _' | |||
|
27 | 29 | from .node import ( |
|
28 | 30 | bin, |
|
29 | 31 | hex, |
|
30 | nullhex, | |
|
31 | 32 | nullid, |
|
32 | 33 | nullrev, |
|
33 | 34 | short, |
@@ -38,6 +39,7 b' from .pycompat import (' | |||
|
38 | 39 | ) |
|
39 | 40 | from . import ( |
|
40 | 41 | bundle2, |
|
42 | bundlerepo, | |
|
41 | 43 | changegroup, |
|
42 | 44 | cmdutil, |
|
43 | 45 | color, |
@@ -75,6 +77,7 b' from . import (' | |||
|
75 | 77 | sshpeer, |
|
76 | 78 | sslutil, |
|
77 | 79 | streamclone, |
|
80 | tags as tagsmod, | |
|
78 | 81 | templater, |
|
79 | 82 | treediscovery, |
|
80 | 83 | upgrade, |
@@ -93,7 +96,10 b' from .utils import (' | |||
|
93 | 96 | stringutil, |
|
94 | 97 | ) |
|
95 | 98 | |
|
96 |
from .revlogutils import |
|
|
99 | from .revlogutils import ( | |
|
100 | deltas as deltautil, | |
|
101 | nodemap, | |
|
102 | ) | |
|
97 | 103 | |
|
98 | 104 | release = lockmod.release |
|
99 | 105 | |
@@ -578,7 +584,7 b' def debugdag(ui, repo, file_=None, *revs' | |||
|
578 | 584 | dots = opts.get('dots') |
|
579 | 585 | if file_: |
|
580 | 586 | rlog = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False), file_) |
|
581 |
revs = |
|
|
587 | revs = {int(r) for r in revs} | |
|
582 | 588 | |
|
583 | 589 | def events(): |
|
584 | 590 | for r in rlog: |
@@ -1128,7 +1134,7 b' def debugfileset(ui, repo, expr, **opts)' | |||
|
1128 | 1134 | (b'analyzed', filesetlang.analyze), |
|
1129 | 1135 | (b'optimized', filesetlang.optimize), |
|
1130 | 1136 | ] |
|
1131 |
stagenames = |
|
|
1137 | stagenames = {n for n, f in stages} | |
|
1132 | 1138 | |
|
1133 | 1139 | showalways = set() |
|
1134 | 1140 | if ui.verbose and not opts[b'show_stage']: |
@@ -1487,6 +1493,11 b' def debuginstall(ui, **opts):' | |||
|
1487 | 1493 | pycompat.sysexecutable or _(b"unknown"), |
|
1488 | 1494 | ) |
|
1489 | 1495 | fm.write( |
|
1496 | b'pythonimplementation', | |
|
1497 | _(b"checking Python implementation (%s)\n"), | |
|
1498 | pycompat.sysbytes(platform.python_implementation()), | |
|
1499 | ) | |
|
1500 | fm.write( | |
|
1490 | 1501 | b'pythonver', |
|
1491 | 1502 | _(b"checking Python version (%s)\n"), |
|
1492 | 1503 | (b"%d.%d.%d" % sys.version_info[:3]), |
@@ -1497,6 +1508,13 b' def debuginstall(ui, **opts):' | |||
|
1497 | 1508 | pythonlib or _(b"unknown"), |
|
1498 | 1509 | ) |
|
1499 | 1510 | |
|
1511 | try: | |
|
1512 | from . import rustext | |
|
1513 | ||
|
1514 | rustext.__doc__ # trigger lazy import | |
|
1515 | except ImportError: | |
|
1516 | rustext = None | |
|
1517 | ||
|
1500 | 1518 | security = set(sslutil.supportedprotocols) |
|
1501 | 1519 | if sslutil.hassni: |
|
1502 | 1520 | security.add(b'sni') |
@@ -1524,6 +1542,13 b' def debuginstall(ui, **opts):' | |||
|
1524 | 1542 | ) |
|
1525 | 1543 | ) |
|
1526 | 1544 | |
|
1545 | fm.plain( | |
|
1546 | _( | |
|
1547 | b"checking Rust extensions (%s)\n" | |
|
1548 | % (b'missing' if rustext is None else b'installed') | |
|
1549 | ), | |
|
1550 | ) | |
|
1551 | ||
|
1527 | 1552 | # TODO print CA cert info |
|
1528 | 1553 | |
|
1529 | 1554 | # hg version |
@@ -1625,6 +1650,13 b' def debuginstall(ui, **opts):' | |||
|
1625 | 1650 | fm.plain(_(b'checking "re2" regexp engine (%s)\n') % re2) |
|
1626 | 1651 | fm.data(re2=bool(util._re2)) |
|
1627 | 1652 | |
|
1653 | rust_debug_mod = policy.importrust("debug") | |
|
1654 | if rust_debug_mod is not None: | |
|
1655 | re2_rust = b'installed' if rust_debug_mod.re2_installed else b'missing' | |
|
1656 | ||
|
1657 | msg = b'checking "re2" regexp engine Rust bindings (%s)\n' | |
|
1658 | fm.plain(_(msg % re2_rust)) | |
|
1659 | ||
|
1628 | 1660 | # templates |
|
1629 | 1661 | p = templater.templatepaths() |
|
1630 | 1662 | fm.write(b'templatedirs', b'checking templates (%s)...\n', b' '.join(p)) |
@@ -1934,120 +1966,100 b' def debugmanifestfulltextcache(ui, repo,' | |||
|
1934 | 1966 | ) |
|
1935 | 1967 | |
|
1936 | 1968 | |
|
1937 | @command(b'debugmergestate', [], b'') | |
|
1938 | def debugmergestate(ui, repo, *args): | |
|
1969 | @command(b'debugmergestate', [] + cmdutil.templateopts, b'') | |
|
1970 | def debugmergestate(ui, repo, *args, **opts): | |
|
1939 | 1971 | """print merge state |
|
1940 | 1972 | |
|
1941 | 1973 | Use --verbose to print out information about whether v1 or v2 merge state |
|
1942 | 1974 | was chosen.""" |
|
1943 | 1975 | |
|
1944 | def _hashornull(h): | |
|
1945 | if h == nullhex: | |
|
1946 | return b'null' | |
|
1947 | else: | |
|
1948 | return h | |
|
1949 | ||
|
1950 | def printrecords(version): | |
|
1951 | ui.writenoi18n(b'* version %d records\n' % version) | |
|
1952 | if version == 1: | |
|
1953 | records = v1records | |
|
1954 | else: | |
|
1955 | records = v2records | |
|
1956 | ||
|
1957 | for rtype, record in records: | |
|
1958 | # pretty print some record types | |
|
1959 | if rtype == b'L': | |
|
1960 | ui.writenoi18n(b'local: %s\n' % record) | |
|
1961 | elif rtype == b'O': | |
|
1962 | ui.writenoi18n(b'other: %s\n' % record) | |
|
1963 | elif rtype == b'm': | |
|
1964 | driver, mdstate = record.split(b'\0', 1) | |
|
1965 | ui.writenoi18n( | |
|
1966 | b'merge driver: %s (state "%s")\n' % (driver, mdstate) | |
|
1967 | ) | |
|
1968 | elif rtype in b'FDC': | |
|
1969 | r = record.split(b'\0') | |
|
1970 | f, state, hash, lfile, afile, anode, ofile = r[0:7] | |
|
1971 | if version == 1: | |
|
1972 | onode = b'not stored in v1 format' | |
|
1973 | flags = r[7] | |
|
1974 | else: | |
|
1975 | onode, flags = r[7:9] | |
|
1976 | ui.writenoi18n( | |
|
1977 | b'file: %s (record type "%s", state "%s", hash %s)\n' | |
|
1978 | % (f, rtype, state, _hashornull(hash)) | |
|
1979 | ) | |
|
1980 | ui.writenoi18n( | |
|
1981 | b' local path: %s (flags "%s")\n' % (lfile, flags) | |
|
1982 | ) | |
|
1983 | ui.writenoi18n( | |
|
1984 | b' ancestor path: %s (node %s)\n' | |
|
1985 | % (afile, _hashornull(anode)) | |
|
1986 | ) | |
|
1987 | ui.writenoi18n( | |
|
1988 | b' other path: %s (node %s)\n' | |
|
1989 | % (ofile, _hashornull(onode)) | |
|
1990 | ) | |
|
1991 | elif rtype == b'f': | |
|
1992 | filename, rawextras = record.split(b'\0', 1) | |
|
1993 | extras = rawextras.split(b'\0') | |
|
1994 | i = 0 | |
|
1995 | extrastrings = [] | |
|
1996 | while i < len(extras): | |
|
1997 | extrastrings.append(b'%s = %s' % (extras[i], extras[i + 1])) | |
|
1998 | i += 2 | |
|
1999 | ||
|
2000 | ui.writenoi18n( | |
|
2001 | b'file extras: %s (%s)\n' | |
|
2002 | % (filename, b', '.join(extrastrings)) | |
|
2003 | ) | |
|
2004 | elif rtype == b'l': | |
|
2005 | labels = record.split(b'\0', 2) | |
|
2006 | labels = [l for l in labels if len(l) > 0] | |
|
2007 | ui.writenoi18n(b'labels:\n') | |
|
2008 | ui.write((b' local: %s\n' % labels[0])) | |
|
2009 | ui.write((b' other: %s\n' % labels[1])) | |
|
2010 | if len(labels) > 2: | |
|
2011 | ui.write((b' base: %s\n' % labels[2])) | |
|
2012 | else: | |
|
2013 | ui.writenoi18n( | |
|
2014 | b'unrecognized entry: %s\t%s\n' | |
|
2015 | % (rtype, record.replace(b'\0', b'\t')) | |
|
2016 | ) | |
|
2017 | ||
|
2018 | # Avoid mergestate.read() since it may raise an exception for unsupported | |
|
2019 | # merge state records. We shouldn't be doing this, but this is OK since this | |
|
2020 | # command is pretty low-level. | |
|
1976 | if ui.verbose: | |
|
2021 | 1977 | ms = mergemod.mergestate(repo) |
|
2022 | 1978 | |
|
2023 | 1979 | # sort so that reasonable information is on top |
|
2024 | 1980 | v1records = ms._readrecordsv1() |
|
2025 | 1981 | v2records = ms._readrecordsv2() |
|
2026 | order = b'LOml' | |
|
2027 | ||
|
2028 | def key(r): | |
|
2029 | idx = order.find(r[0]) | |
|
2030 | if idx == -1: | |
|
2031 | return (1, r[1]) | |
|
2032 | else: | |
|
2033 | return (0, idx) | |
|
2034 | ||
|
2035 | v1records.sort(key=key) | |
|
2036 | v2records.sort(key=key) | |
|
2037 | 1982 | |
|
2038 | 1983 | if not v1records and not v2records: |
|
2039 | ui.writenoi18n(b'no merge state found\n') | |
|
1984 | pass | |
|
2040 | 1985 | elif not v2records: |
|
2041 |
ui. |
|
|
2042 | printrecords(1) | |
|
1986 | ui.writenoi18n(b'no version 2 merge state\n') | |
|
2043 | 1987 | elif ms._v1v2match(v1records, v2records): |
|
2044 |
ui. |
|
|
2045 | printrecords(2) | |
|
1988 | ui.writenoi18n(b'v1 and v2 states match: using v2\n') | |
|
2046 | 1989 | else: |
|
2047 |
ui. |
|
|
2048 | printrecords(1) | |
|
2049 | if ui.verbose: | |
|
2050 | printrecords(2) | |
|
1990 | ui.writenoi18n(b'v1 and v2 states mismatch: using v1\n') | |
|
1991 | ||
|
1992 | opts = pycompat.byteskwargs(opts) | |
|
1993 | if not opts[b'template']: | |
|
1994 | opts[b'template'] = ( | |
|
1995 | b'{if(commits, "", "no merge state found\n")}' | |
|
1996 | b'{commits % "{name}{if(label, " ({label})")}: {node}\n"}' | |
|
1997 | b'{files % "file: {path} (state \\"{state}\\")\n' | |
|
1998 | b'{if(local_path, "' | |
|
1999 | b' local path: {local_path} (hash {local_key}, flags \\"{local_flags}\\")\n' | |
|
2000 | b' ancestor path: {ancestor_path} (node {ancestor_node})\n' | |
|
2001 | b' other path: {other_path} (node {other_node})\n' | |
|
2002 | b'")}' | |
|
2003 | b'{if(rename_side, "' | |
|
2004 | b' rename side: {rename_side}\n' | |
|
2005 | b' renamed path: {renamed_path}\n' | |
|
2006 | b'")}' | |
|
2007 | b'{extras % " extra: {key} = {value}\n"}' | |
|
2008 | b'"}' | |
|
2009 | ) | |
|
2010 | ||
|
2011 | ms = mergemod.mergestate.read(repo) | |
|
2012 | ||
|
2013 | fm = ui.formatter(b'debugmergestate', opts) | |
|
2014 | fm.startitem() | |
|
2015 | ||
|
2016 | fm_commits = fm.nested(b'commits') | |
|
2017 | if ms.active(): | |
|
2018 | for name, node, label_index in ( | |
|
2019 | (b'local', ms.local, 0), | |
|
2020 | (b'other', ms.other, 1), | |
|
2021 | ): | |
|
2022 | fm_commits.startitem() | |
|
2023 | fm_commits.data(name=name) | |
|
2024 | fm_commits.data(node=hex(node)) | |
|
2025 | if ms._labels and len(ms._labels) > label_index: | |
|
2026 | fm_commits.data(label=ms._labels[label_index]) | |
|
2027 | fm_commits.end() | |
|
2028 | ||
|
2029 | fm_files = fm.nested(b'files') | |
|
2030 | if ms.active(): | |
|
2031 | for f in ms: | |
|
2032 | fm_files.startitem() | |
|
2033 | fm_files.data(path=f) | |
|
2034 | state = ms._state[f] | |
|
2035 | fm_files.data(state=state[0]) | |
|
2036 | if state[0] in ( | |
|
2037 | mergemod.MERGE_RECORD_UNRESOLVED, | |
|
2038 | mergemod.MERGE_RECORD_RESOLVED, | |
|
2039 | ): | |
|
2040 | fm_files.data(local_key=state[1]) | |
|
2041 | fm_files.data(local_path=state[2]) | |
|
2042 | fm_files.data(ancestor_path=state[3]) | |
|
2043 | fm_files.data(ancestor_node=state[4]) | |
|
2044 | fm_files.data(other_path=state[5]) | |
|
2045 | fm_files.data(other_node=state[6]) | |
|
2046 | fm_files.data(local_flags=state[7]) | |
|
2047 | elif state[0] in ( | |
|
2048 | mergemod.MERGE_RECORD_UNRESOLVED_PATH, | |
|
2049 | mergemod.MERGE_RECORD_RESOLVED_PATH, | |
|
2050 | ): | |
|
2051 | fm_files.data(renamed_path=state[1]) | |
|
2052 | fm_files.data(rename_side=state[2]) | |
|
2053 | fm_extras = fm_files.nested(b'extras') | |
|
2054 | for k, v in ms.extras(f).items(): | |
|
2055 | fm_extras.startitem() | |
|
2056 | fm_extras.data(key=k) | |
|
2057 | fm_extras.data(value=v) | |
|
2058 | fm_extras.end() | |
|
2059 | ||
|
2060 | fm_files.end() | |
|
2061 | ||
|
2062 | fm.end() | |
|
2051 | 2063 | |
|
2052 | 2064 | |
|
2053 | 2065 | @command(b'debugnamecomplete', [], _(b'NAME...')) |
@@ -2075,6 +2087,70 b' def debugnamecomplete(ui, repo, *args):' | |||
|
2075 | 2087 | |
|
2076 | 2088 | |
|
2077 | 2089 | @command( |
|
2090 | b'debugnodemap', | |
|
2091 | [ | |
|
2092 | ( | |
|
2093 | b'', | |
|
2094 | b'dump-new', | |
|
2095 | False, | |
|
2096 | _(b'write a (new) persistent binary nodemap on stdin'), | |
|
2097 | ), | |
|
2098 | (b'', b'dump-disk', False, _(b'dump on-disk data on stdin')), | |
|
2099 | ( | |
|
2100 | b'', | |
|
2101 | b'check', | |
|
2102 | False, | |
|
2103 | _(b'check that the data on disk data are correct.'), | |
|
2104 | ), | |
|
2105 | ( | |
|
2106 | b'', | |
|
2107 | b'metadata', | |
|
2108 | False, | |
|
2109 | _(b'display the on disk meta data for the nodemap'), | |
|
2110 | ), | |
|
2111 | ], | |
|
2112 | ) | |
|
2113 | def debugnodemap(ui, repo, **opts): | |
|
2114 | """write and inspect on disk nodemap | |
|
2115 | """ | |
|
2116 | if opts['dump_new']: | |
|
2117 | unfi = repo.unfiltered() | |
|
2118 | cl = unfi.changelog | |
|
2119 | if util.safehasattr(cl.index, "nodemap_data_all"): | |
|
2120 | data = cl.index.nodemap_data_all() | |
|
2121 | else: | |
|
2122 | data = nodemap.persistent_data(cl.index) | |
|
2123 | ui.write(data) | |
|
2124 | elif opts['dump_disk']: | |
|
2125 | unfi = repo.unfiltered() | |
|
2126 | cl = unfi.changelog | |
|
2127 | nm_data = nodemap.persisted_data(cl) | |
|
2128 | if nm_data is not None: | |
|
2129 | docket, data = nm_data | |
|
2130 | ui.write(data[:]) | |
|
2131 | elif opts['check']: | |
|
2132 | unfi = repo.unfiltered() | |
|
2133 | cl = unfi.changelog | |
|
2134 | nm_data = nodemap.persisted_data(cl) | |
|
2135 | if nm_data is not None: | |
|
2136 | docket, data = nm_data | |
|
2137 | return nodemap.check_data(ui, cl.index, data) | |
|
2138 | elif opts['metadata']: | |
|
2139 | unfi = repo.unfiltered() | |
|
2140 | cl = unfi.changelog | |
|
2141 | nm_data = nodemap.persisted_data(cl) | |
|
2142 | if nm_data is not None: | |
|
2143 | docket, data = nm_data | |
|
2144 | ui.write((b"uid: %s\n") % docket.uid) | |
|
2145 | ui.write((b"tip-rev: %d\n") % docket.tip_rev) | |
|
2146 | ui.write((b"tip-node: %s\n") % hex(docket.tip_node)) | |
|
2147 | ui.write((b"data-length: %d\n") % docket.data_length) | |
|
2148 | ui.write((b"data-unused: %d\n") % docket.data_unused) | |
|
2149 | unused_perc = docket.data_unused * 100.0 / docket.data_length | |
|
2150 | ui.write((b"data-unused: %2.3f%%\n") % unused_perc) | |
|
2151 | ||
|
2152 | ||
|
2153 | @command( | |
|
2078 | 2154 | b'debugobsolete', |
|
2079 | 2155 | [ |
|
2080 | 2156 | (b'', b'flags', 0, _(b'markers flag')), |
@@ -2549,7 +2625,7 b' def debugrebuilddirstate(ui, repo, rev, ' | |||
|
2549 | 2625 | dirstatefiles = set(dirstate) |
|
2550 | 2626 | manifestonly = manifestfiles - dirstatefiles |
|
2551 | 2627 | dsonly = dirstatefiles - manifestfiles |
|
2552 |
dsnotadded = |
|
|
2628 | dsnotadded = {f for f in dsonly if dirstate[f] != b'a'} | |
|
2553 | 2629 | changedfiles = manifestonly | dsnotadded |
|
2554 | 2630 | |
|
2555 | 2631 | dirstate.rebuild(ctx.node(), ctx.manifest(), changedfiles) |
@@ -3116,7 +3192,7 b' def debugrevspec(ui, repo, expr, **opts)' | |||
|
3116 | 3192 | raise error.Abort( |
|
3117 | 3193 | _(b'cannot use --verify-optimized with --no-optimized') |
|
3118 | 3194 | ) |
|
3119 |
stagenames = |
|
|
3195 | stagenames = {n for n, f in stages} | |
|
3120 | 3196 | |
|
3121 | 3197 | showalways = set() |
|
3122 | 3198 | showchanged = set() |
@@ -3355,6 +3431,143 b' def debugssl(ui, repo, source=None, **op' | |||
|
3355 | 3431 | |
|
3356 | 3432 | |
|
3357 | 3433 | @command( |
|
3434 | b"debugbackupbundle", | |
|
3435 | [ | |
|
3436 | ( | |
|
3437 | b"", | |
|
3438 | b"recover", | |
|
3439 | b"", | |
|
3440 | b"brings the specified changeset back into the repository", | |
|
3441 | ) | |
|
3442 | ] | |
|
3443 | + cmdutil.logopts, | |
|
3444 | _(b"hg debugbackupbundle [--recover HASH]"), | |
|
3445 | ) | |
|
3446 | def debugbackupbundle(ui, repo, *pats, **opts): | |
|
3447 | """lists the changesets available in backup bundles | |
|
3448 | ||
|
3449 | Without any arguments, this command prints a list of the changesets in each | |
|
3450 | backup bundle. | |
|
3451 | ||
|
3452 | --recover takes a changeset hash and unbundles the first bundle that | |
|
3453 | contains that hash, which puts that changeset back in your repository. | |
|
3454 | ||
|
3455 | --verbose will print the entire commit message and the bundle path for that | |
|
3456 | backup. | |
|
3457 | """ | |
|
3458 | backups = list( | |
|
3459 | filter( | |
|
3460 | os.path.isfile, glob.glob(repo.vfs.join(b"strip-backup") + b"/*.hg") | |
|
3461 | ) | |
|
3462 | ) | |
|
3463 | backups.sort(key=lambda x: os.path.getmtime(x), reverse=True) | |
|
3464 | ||
|
3465 | opts = pycompat.byteskwargs(opts) | |
|
3466 | opts[b"bundle"] = b"" | |
|
3467 | opts[b"force"] = None | |
|
3468 | limit = logcmdutil.getlimit(opts) | |
|
3469 | ||
|
3470 | def display(other, chlist, displayer): | |
|
3471 | if opts.get(b"newest_first"): | |
|
3472 | chlist.reverse() | |
|
3473 | count = 0 | |
|
3474 | for n in chlist: | |
|
3475 | if limit is not None and count >= limit: | |
|
3476 | break | |
|
3477 | parents = [True for p in other.changelog.parents(n) if p != nullid] | |
|
3478 | if opts.get(b"no_merges") and len(parents) == 2: | |
|
3479 | continue | |
|
3480 | count += 1 | |
|
3481 | displayer.show(other[n]) | |
|
3482 | ||
|
3483 | recovernode = opts.get(b"recover") | |
|
3484 | if recovernode: | |
|
3485 | if scmutil.isrevsymbol(repo, recovernode): | |
|
3486 | ui.warn(_(b"%s already exists in the repo\n") % recovernode) | |
|
3487 | return | |
|
3488 | elif backups: | |
|
3489 | msg = _( | |
|
3490 | b"Recover changesets using: hg debugbackupbundle --recover " | |
|
3491 | b"<changeset hash>\n\nAvailable backup changesets:" | |
|
3492 | ) | |
|
3493 | ui.status(msg, label=b"status.removed") | |
|
3494 | else: | |
|
3495 | ui.status(_(b"no backup changesets found\n")) | |
|
3496 | return | |
|
3497 | ||
|
3498 | for backup in backups: | |
|
3499 | # Much of this is copied from the hg incoming logic | |
|
3500 | source = ui.expandpath(os.path.relpath(backup, encoding.getcwd())) | |
|
3501 | source, branches = hg.parseurl(source, opts.get(b"branch")) | |
|
3502 | try: | |
|
3503 | other = hg.peer(repo, opts, source) | |
|
3504 | except error.LookupError as ex: | |
|
3505 | msg = _(b"\nwarning: unable to open bundle %s") % source | |
|
3506 | hint = _(b"\n(missing parent rev %s)\n") % short(ex.name) | |
|
3507 | ui.warn(msg, hint=hint) | |
|
3508 | continue | |
|
3509 | revs, checkout = hg.addbranchrevs( | |
|
3510 | repo, other, branches, opts.get(b"rev") | |
|
3511 | ) | |
|
3512 | ||
|
3513 | if revs: | |
|
3514 | revs = [other.lookup(rev) for rev in revs] | |
|
3515 | ||
|
3516 | quiet = ui.quiet | |
|
3517 | try: | |
|
3518 | ui.quiet = True | |
|
3519 | other, chlist, cleanupfn = bundlerepo.getremotechanges( | |
|
3520 | ui, repo, other, revs, opts[b"bundle"], opts[b"force"] | |
|
3521 | ) | |
|
3522 | except error.LookupError: | |
|
3523 | continue | |
|
3524 | finally: | |
|
3525 | ui.quiet = quiet | |
|
3526 | ||
|
3527 | try: | |
|
3528 | if not chlist: | |
|
3529 | continue | |
|
3530 | if recovernode: | |
|
3531 | with repo.lock(), repo.transaction(b"unbundle") as tr: | |
|
3532 | if scmutil.isrevsymbol(other, recovernode): | |
|
3533 | ui.status(_(b"Unbundling %s\n") % (recovernode)) | |
|
3534 | f = hg.openpath(ui, source) | |
|
3535 | gen = exchange.readbundle(ui, f, source) | |
|
3536 | if isinstance(gen, bundle2.unbundle20): | |
|
3537 | bundle2.applybundle( | |
|
3538 | repo, | |
|
3539 | gen, | |
|
3540 | tr, | |
|
3541 | source=b"unbundle", | |
|
3542 | url=b"bundle:" + source, | |
|
3543 | ) | |
|
3544 | else: | |
|
3545 | gen.apply(repo, b"unbundle", b"bundle:" + source) | |
|
3546 | break | |
|
3547 | else: | |
|
3548 | backupdate = encoding.strtolocal( | |
|
3549 | time.strftime( | |
|
3550 | "%a %H:%M, %Y-%m-%d", | |
|
3551 | time.localtime(os.path.getmtime(source)), | |
|
3552 | ) | |
|
3553 | ) | |
|
3554 | ui.status(b"\n%s\n" % (backupdate.ljust(50))) | |
|
3555 | if ui.verbose: | |
|
3556 | ui.status(b"%s%s\n" % (b"bundle:".ljust(13), source)) | |
|
3557 | else: | |
|
3558 | opts[ | |
|
3559 | b"template" | |
|
3560 | ] = b"{label('status.modified', node|short)} {desc|firstline}\n" | |
|
3561 | displayer = logcmdutil.changesetdisplayer( | |
|
3562 | ui, other, opts, False | |
|
3563 | ) | |
|
3564 | display(other, chlist, displayer) | |
|
3565 | displayer.close() | |
|
3566 | finally: | |
|
3567 | cleanupfn() | |
|
3568 | ||
|
3569 | ||
|
3570 | @command( | |
|
3358 | 3571 | b'debugsub', |
|
3359 | 3572 | [(b'r', b'rev', b'', _(b'revision to check'), _(b'REV'))], |
|
3360 | 3573 | _(b'[-r REV] [REV]'), |
@@ -3423,6 +3636,17 b' def debugsuccessorssets(ui, repo, *revs,' | |||
|
3423 | 3636 | ui.write(b'\n') |
|
3424 | 3637 | |
|
3425 | 3638 | |
|
3639 | @command(b'debugtagscache', []) | |
|
3640 | def debugtagscache(ui, repo): | |
|
3641 | """display the contents of .hg/cache/hgtagsfnodes1""" | |
|
3642 | cache = tagsmod.hgtagsfnodescache(repo.unfiltered()) | |
|
3643 | for r in repo: | |
|
3644 | node = repo[r].node() | |
|
3645 | tagsnode = cache.getfnode(node, computemissing=False) | |
|
3646 | tagsnodedisplay = hex(tagsnode) if tagsnode else b'missing/invalid' | |
|
3647 | ui.write(b'%d %s %s\n' % (r, hex(node), tagsnodedisplay)) | |
|
3648 | ||
|
3649 | ||
|
3426 | 3650 | @command( |
|
3427 | 3651 | b'debugtemplate', |
|
3428 | 3652 | [ |
@@ -3497,7 +3721,7 b' def debugtemplate(ui, repo, tmpl, **opts' | |||
|
3497 | 3721 | def debuguigetpass(ui, prompt=b''): |
|
3498 | 3722 | """show prompt to type password""" |
|
3499 | 3723 | r = ui.getpass(prompt) |
|
3500 | ui.writenoi18n(b'respose: %s\n' % r) | |
|
3724 | ui.writenoi18n(b'response: %s\n' % r) | |
|
3501 | 3725 | |
|
3502 | 3726 | |
|
3503 | 3727 | @command( |
@@ -27,6 +27,7 b' from . import (' | |||
|
27 | 27 | policy, |
|
28 | 28 | pycompat, |
|
29 | 29 | scmutil, |
|
30 | sparse, | |
|
30 | 31 | txnutil, |
|
31 | 32 | util, |
|
32 | 33 | ) |
@@ -1083,7 +1084,7 b' class dirstate(object):' | |||
|
1083 | 1084 | results[next(iv)] = st |
|
1084 | 1085 | return results |
|
1085 | 1086 | |
|
1086 | def _rust_status(self, matcher, list_clean): | |
|
1087 | def _rust_status(self, matcher, list_clean, list_ignored, list_unknown): | |
|
1087 | 1088 | # Force Rayon (Rust parallelism library) to respect the number of |
|
1088 | 1089 | # workers. This is a temporary workaround until Rust code knows |
|
1089 | 1090 | # how to read the config file. |
@@ -1101,16 +1102,45 b' class dirstate(object):' | |||
|
1101 | 1102 | added, |
|
1102 | 1103 | removed, |
|
1103 | 1104 | deleted, |
|
1105 | clean, | |
|
1106 | ignored, | |
|
1104 | 1107 | unknown, |
|
1105 |
|
|
|
1108 | warnings, | |
|
1109 | bad, | |
|
1106 | 1110 | ) = rustmod.status( |
|
1107 | 1111 | self._map._rustmap, |
|
1108 | 1112 | matcher, |
|
1109 | 1113 | self._rootdir, |
|
1110 | bool(list_clean), | |
|
1114 | self._ignorefiles(), | |
|
1115 | self._checkexec, | |
|
1111 | 1116 | self._lastnormaltime, |
|
1112 | self._checkexec, | |
|
1117 | bool(list_clean), | |
|
1118 | bool(list_ignored), | |
|
1119 | bool(list_unknown), | |
|
1120 | ) | |
|
1121 | if self._ui.warn: | |
|
1122 | for item in warnings: | |
|
1123 | if isinstance(item, tuple): | |
|
1124 | file_path, syntax = item | |
|
1125 | msg = _(b"%s: ignoring invalid syntax '%s'\n") % ( | |
|
1126 | file_path, | |
|
1127 | syntax, | |
|
1113 | 1128 | ) |
|
1129 | self._ui.warn(msg) | |
|
1130 | else: | |
|
1131 | msg = _(b"skipping unreadable pattern file '%s': %s\n") | |
|
1132 | self._ui.warn( | |
|
1133 | msg | |
|
1134 | % ( | |
|
1135 | pathutil.canonpath( | |
|
1136 | self._rootdir, self._rootdir, item | |
|
1137 | ), | |
|
1138 | b"No such file or directory", | |
|
1139 | ) | |
|
1140 | ) | |
|
1141 | ||
|
1142 | for (fn, message) in bad: | |
|
1143 | matcher.bad(fn, encoding.strtolocal(message)) | |
|
1114 | 1144 | |
|
1115 | 1145 | status = scmutil.status( |
|
1116 | 1146 | modified=modified, |
@@ -1118,7 +1148,7 b' class dirstate(object):' | |||
|
1118 | 1148 | removed=removed, |
|
1119 | 1149 | deleted=deleted, |
|
1120 | 1150 | unknown=unknown, |
|
1121 |
ignored= |
|
|
1151 | ignored=ignored, | |
|
1122 | 1152 | clean=clean, |
|
1123 | 1153 | ) |
|
1124 | 1154 | return (lookup, status) |
@@ -1148,26 +1178,34 b' class dirstate(object):' | |||
|
1148 | 1178 | |
|
1149 | 1179 | use_rust = True |
|
1150 | 1180 | |
|
1151 | allowed_matchers = (matchmod.alwaysmatcher, matchmod.exactmatcher) | |
|
1181 | allowed_matchers = ( | |
|
1182 | matchmod.alwaysmatcher, | |
|
1183 | matchmod.exactmatcher, | |
|
1184 | matchmod.includematcher, | |
|
1185 | ) | |
|
1152 | 1186 | |
|
1153 | 1187 | if rustmod is None: |
|
1154 | 1188 | use_rust = False |
|
1189 | elif self._checkcase: | |
|
1190 | # Case-insensitive filesystems are not handled yet | |
|
1191 | use_rust = False | |
|
1155 | 1192 | elif subrepos: |
|
1156 | 1193 | use_rust = False |
|
1157 | elif bool(listunknown): | |
|
1158 | # Pathauditor does not exist yet in Rust, unknown files | |
|
1159 | # can't be trusted. | |
|
1194 | elif sparse.enabled: | |
|
1160 | 1195 | use_rust = False |
|
1161 | elif self._ignorefiles() and listignored: | |
|
1162 | # Rust has no ignore mechanism yet, so don't use Rust for | |
|
1163 | # commands that need ignore. | |
|
1196 | elif match.traversedir is not None: | |
|
1164 | 1197 | use_rust = False |
|
1165 | 1198 | elif not isinstance(match, allowed_matchers): |
|
1166 | 1199 | # Matchers have yet to be implemented |
|
1167 | 1200 | use_rust = False |
|
1168 | 1201 | |
|
1169 | 1202 | if use_rust: |
|
1170 | return self._rust_status(match, listclean) | |
|
1203 | try: | |
|
1204 | return self._rust_status( | |
|
1205 | match, listclean, listignored, listunknown | |
|
1206 | ) | |
|
1207 | except rustmod.FallbackError: | |
|
1208 | pass | |
|
1171 | 1209 | |
|
1172 | 1210 | def noop(f): |
|
1173 | 1211 | pass |
@@ -1249,19 +1287,19 b' class dirstate(object):' | |||
|
1249 | 1287 | aadd(fn) |
|
1250 | 1288 | elif state == b'r': |
|
1251 | 1289 | radd(fn) |
|
1252 | ||
|
1253 | return ( | |
|
1254 | lookup, | |
|
1255 | scmutil.status( | |
|
1290 | status = scmutil.status( | |
|
1256 | 1291 |
|
|
1257 | ), | |
|
1258 | 1292 | ) |
|
1293 | return (lookup, status) | |
|
1259 | 1294 | |
|
1260 | 1295 | def matches(self, match): |
|
1261 | 1296 | ''' |
|
1262 | 1297 | return files in the dirstate (in whatever state) filtered by match |
|
1263 | 1298 | ''' |
|
1264 | 1299 | dmap = self._map |
|
1300 | if rustmod is not None: | |
|
1301 | dmap = self._map._rustmap | |
|
1302 | ||
|
1265 | 1303 | if match.always(): |
|
1266 | 1304 | return dmap.keys() |
|
1267 | 1305 | files = match.files() |
@@ -192,7 +192,7 b' def findcommonoutgoing(' | |||
|
192 | 192 | # ancestors of missing |
|
193 | 193 | og._computecommonmissing() |
|
194 | 194 | cl = repo.changelog |
|
195 |
missingrevs = |
|
|
195 | missingrevs = {cl.rev(n) for n in og._missing} | |
|
196 | 196 | og._common = set(cl.ancestors(missingrevs)) - missingrevs |
|
197 | 197 | commonheads = set(og.commonheads) |
|
198 | 198 | og.missingheads = [h for h in og.missingheads if h not in commonheads] |
@@ -268,8 +268,8 b' def _headssummary(pushop):' | |||
|
268 | 268 | # If there are no obsstore, no post processing are needed. |
|
269 | 269 | if repo.obsstore: |
|
270 | 270 | torev = repo.changelog.rev |
|
271 |
futureheads = |
|
|
272 |
futureheads |= |
|
|
271 | futureheads = {torev(h) for h in outgoing.missingheads} | |
|
272 | futureheads |= {torev(h) for h in outgoing.commonheads} | |
|
273 | 273 | allfuturecommon = repo.changelog.ancestors(futureheads, inclusive=True) |
|
274 | 274 | for branch, heads in sorted(pycompat.iteritems(headssum)): |
|
275 | 275 | remoteheads, newheads, unsyncedheads, placeholder = heads |
@@ -452,7 +452,7 b' def checkheads(pushop):' | |||
|
452 | 452 | if branch not in (b'default', None): |
|
453 | 453 | errormsg = _( |
|
454 | 454 | b"push creates new remote head %s on branch '%s'!" |
|
455 | ) % (short(dhs[0]), branch) | |
|
455 | ) % (short(dhs[0]), branch,) | |
|
456 | 456 | elif repo[dhs[0]].bookmarks(): |
|
457 | 457 | errormsg = _( |
|
458 | 458 | b"push creates new remote head %s " |
@@ -10,6 +10,7 b' from __future__ import absolute_import, ' | |||
|
10 | 10 | import difflib |
|
11 | 11 | import errno |
|
12 | 12 | import getopt |
|
13 | import io | |
|
13 | 14 | import os |
|
14 | 15 | import pdb |
|
15 | 16 | import re |
@@ -144,7 +145,50 b' def run():' | |||
|
144 | 145 | if pycompat.ispy3: |
|
145 | 146 | |
|
146 | 147 | def initstdio(): |
|
147 | pass | |
|
148 | # stdio streams on Python 3 are io.TextIOWrapper instances proxying another | |
|
149 | # buffer. These streams will normalize \n to \r\n by default. Mercurial's | |
|
150 | # preferred mechanism for writing output (ui.write()) uses io.BufferedWriter | |
|
151 | # instances, which write to the underlying stdio file descriptor in binary | |
|
152 | # mode. ui.write() uses \n for line endings and no line ending normalization | |
|
153 | # is attempted through this interface. This "just works," even if the system | |
|
154 | # preferred line ending is not \n. | |
|
155 | # | |
|
156 | # But some parts of Mercurial (e.g. hooks) can still send data to sys.stdout | |
|
157 | # and sys.stderr. They will inherit the line ending normalization settings, | |
|
158 | # potentially causing e.g. \r\n to be emitted. Since emitting \n should | |
|
159 | # "just work," here we change the sys.* streams to disable line ending | |
|
160 | # normalization, ensuring compatibility with our ui type. | |
|
161 | ||
|
162 | # write_through is new in Python 3.7. | |
|
163 | kwargs = { | |
|
164 | "newline": "\n", | |
|
165 | "line_buffering": sys.stdout.line_buffering, | |
|
166 | } | |
|
167 | if util.safehasattr(sys.stdout, "write_through"): | |
|
168 | kwargs["write_through"] = sys.stdout.write_through | |
|
169 | sys.stdout = io.TextIOWrapper( | |
|
170 | sys.stdout.buffer, sys.stdout.encoding, sys.stdout.errors, **kwargs | |
|
171 | ) | |
|
172 | ||
|
173 | kwargs = { | |
|
174 | "newline": "\n", | |
|
175 | "line_buffering": sys.stderr.line_buffering, | |
|
176 | } | |
|
177 | if util.safehasattr(sys.stderr, "write_through"): | |
|
178 | kwargs["write_through"] = sys.stderr.write_through | |
|
179 | sys.stderr = io.TextIOWrapper( | |
|
180 | sys.stderr.buffer, sys.stderr.encoding, sys.stderr.errors, **kwargs | |
|
181 | ) | |
|
182 | ||
|
183 | # No write_through on read-only stream. | |
|
184 | sys.stdin = io.TextIOWrapper( | |
|
185 | sys.stdin.buffer, | |
|
186 | sys.stdin.encoding, | |
|
187 | sys.stdin.errors, | |
|
188 | # None is universal newlines mode. | |
|
189 | newline=None, | |
|
190 | line_buffering=sys.stdin.line_buffering, | |
|
191 | ) | |
|
148 | 192 | |
|
149 | 193 | def _silencestdio(): |
|
150 | 194 | for fp in (sys.stdout, sys.stderr): |
@@ -514,7 +558,7 b' def aliasinterpolate(name, args, cmd):' | |||
|
514 | 558 | ''' |
|
515 | 559 | # util.interpolate can't deal with "$@" (with quotes) because it's only |
|
516 | 560 | # built to match prefix + patterns. |
|
517 |
replacemap = |
|
|
561 | replacemap = {b'$%d' % (i + 1): arg for i, arg in enumerate(args)} | |
|
518 | 562 | replacemap[b'$0'] = name |
|
519 | 563 | replacemap[b'$$'] = b'$' |
|
520 | 564 | replacemap[b'$@'] = b' '.join(args) |
@@ -624,7 +668,7 b' class cmdalias(object):' | |||
|
624 | 668 | except error.AmbiguousCommand: |
|
625 | 669 | self.badalias = _( |
|
626 | 670 | b"alias '%s' resolves to ambiguous command '%s'" |
|
627 | ) % (self.name, cmd) | |
|
671 | ) % (self.name, cmd,) | |
|
628 | 672 | |
|
629 | 673 | def _populatehelp(self, ui, name, cmd, fn, defaulthelp=None): |
|
630 | 674 | # confine strings to be passed to i18n.gettext() |
@@ -86,10 +86,10 b' elif _nativeenviron:' | |||
|
86 | 86 | else: |
|
87 | 87 | # preferred encoding isn't known yet; use utf-8 to avoid unicode error |
|
88 | 88 | # and recreate it once encoding is settled |
|
89 |
environ = |
|
|
90 |
|
|
|
89 | environ = { | |
|
90 | k.encode('utf-8'): v.encode('utf-8') | |
|
91 | 91 | for k, v in os.environ.items() # re-exports |
|
92 |
|
|
|
92 | } | |
|
93 | 93 | |
|
94 | 94 | _encodingrewrites = { |
|
95 | 95 | b'646': b'ascii', |
@@ -285,10 +285,10 b' else:' | |||
|
285 | 285 | if not _nativeenviron: |
|
286 | 286 | # now encoding and helper functions are available, recreate the environ |
|
287 | 287 | # dict to be exported to other modules |
|
288 |
environ = |
|
|
289 |
|
|
|
288 | environ = { | |
|
289 | tolocal(k.encode('utf-8')): tolocal(v.encode('utf-8')) | |
|
290 | 290 | for k, v in os.environ.items() # re-exports |
|
291 |
|
|
|
291 | } | |
|
292 | 292 | |
|
293 | 293 | if pycompat.ispy3: |
|
294 | 294 | # os.getcwd() on Python 3 returns string, but it has os.getcwdb() which |
@@ -8,6 +8,7 b'' | |||
|
8 | 8 | from __future__ import absolute_import |
|
9 | 9 | |
|
10 | 10 | import collections |
|
11 | import weakref | |
|
11 | 12 | |
|
12 | 13 | from .i18n import _ |
|
13 | 14 | from .node import ( |
@@ -856,6 +857,10 b' def _processcompared(pushop, pushed, exp' | |||
|
856 | 857 | for b, scid, dcid in addsrc: |
|
857 | 858 | if b in explicit: |
|
858 | 859 | explicit.remove(b) |
|
860 | if bookmod.isdivergent(b): | |
|
861 | pushop.ui.warn(_(b'cannot push divergent bookmark %s!\n') % b) | |
|
862 | pushop.bkresult = 2 | |
|
863 | else: | |
|
859 | 864 | pushop.outbookmarks.append((b, b'', scid)) |
|
860 | 865 | # search for overwritten bookmark |
|
861 | 866 | for b, scid, dcid in list(advdst) + list(diverge) + list(differ): |
@@ -1675,12 +1680,12 b' def _fullpullbundle2(repo, pullop):' | |||
|
1675 | 1680 | def headsofdiff(h1, h2): |
|
1676 | 1681 | """Returns heads(h1 % h2)""" |
|
1677 | 1682 | res = unfi.set(b'heads(%ln %% %ln)', h1, h2) |
|
1678 |
return |
|
|
1683 | return {ctx.node() for ctx in res} | |
|
1679 | 1684 | |
|
1680 | 1685 | def headsofunion(h1, h2): |
|
1681 | 1686 | """Returns heads((h1 + h2) - null)""" |
|
1682 | 1687 | res = unfi.set(b'heads((%ln + %ln - null))', h1, h2) |
|
1683 |
return |
|
|
1688 | return {ctx.node() for ctx in res} | |
|
1684 | 1689 | |
|
1685 | 1690 | while True: |
|
1686 | 1691 | old_heads = unficl.heads() |
@@ -1701,6 +1706,25 b' def _fullpullbundle2(repo, pullop):' | |||
|
1701 | 1706 | pullop.rheads = set(pullop.rheads) - pullop.common |
|
1702 | 1707 | |
|
1703 | 1708 | |
|
1709 | def add_confirm_callback(repo, pullop): | |
|
1710 | """ adds a finalize callback to transaction which can be used to show stats | |
|
1711 | to user and confirm the pull before committing transaction """ | |
|
1712 | ||
|
1713 | tr = pullop.trmanager.transaction() | |
|
1714 | scmutil.registersummarycallback( | |
|
1715 | repo, tr, txnname=b'pull', as_validator=True | |
|
1716 | ) | |
|
1717 | reporef = weakref.ref(repo.unfiltered()) | |
|
1718 | ||
|
1719 | def prompt(tr): | |
|
1720 | repo = reporef() | |
|
1721 | cm = _(b'accept incoming changes (yn)?$$ &Yes $$ &No') | |
|
1722 | if repo.ui.promptchoice(cm): | |
|
1723 | raise error.Abort("user aborted") | |
|
1724 | ||
|
1725 | tr.addvalidator(b'900-pull-prompt', prompt) | |
|
1726 | ||
|
1727 | ||
|
1704 | 1728 | def pull( |
|
1705 | 1729 | repo, |
|
1706 | 1730 | remote, |
@@ -1712,6 +1736,7 b' def pull(' | |||
|
1712 | 1736 | includepats=None, |
|
1713 | 1737 | excludepats=None, |
|
1714 | 1738 | depth=None, |
|
1739 | confirm=None, | |
|
1715 | 1740 | ): |
|
1716 | 1741 | """Fetch repository data from a remote. |
|
1717 | 1742 | |
@@ -1736,6 +1761,8 b' def pull(' | |||
|
1736 | 1761 | ``depth`` is an integer indicating the DAG depth of history we're |
|
1737 | 1762 | interested in. If defined, for each revision specified in ``heads``, we |
|
1738 | 1763 | will fetch up to this many of its ancestors and data associated with them. |
|
1764 | ``confirm`` is a boolean indicating whether the pull should be confirmed | |
|
1765 | before committing the transaction. This overrides HGPLAIN. | |
|
1739 | 1766 | |
|
1740 | 1767 | Returns the ``pulloperation`` created for this pull. |
|
1741 | 1768 | """ |
@@ -1782,6 +1809,11 b' def pull(' | |||
|
1782 | 1809 | if not bookmod.bookmarksinstore(repo): |
|
1783 | 1810 | wlock = repo.wlock() |
|
1784 | 1811 | with wlock, repo.lock(), pullop.trmanager: |
|
1812 | if confirm or ( | |
|
1813 | repo.ui.configbool(b"pull", b"confirm") and not repo.ui.plain() | |
|
1814 | ): | |
|
1815 | add_confirm_callback(repo, pullop) | |
|
1816 | ||
|
1785 | 1817 | # Use the modern wire protocol, if available. |
|
1786 | 1818 | if remote.capable(b'command-changesetdata'): |
|
1787 | 1819 | exchangev2.pull(pullop) |
@@ -3068,7 +3100,15 b' def sortclonebundleentries(ui, entries):' | |||
|
3068 | 3100 | if not prefers: |
|
3069 | 3101 | return list(entries) |
|
3070 | 3102 | |
|
3071 | prefers = [p.split(b'=', 1) for p in prefers] | |
|
3103 | def _split(p): | |
|
3104 | if b'=' not in p: | |
|
3105 | hint = _(b"each comma separated item should be key=value pairs") | |
|
3106 | raise error.Abort( | |
|
3107 | _(b"invalid ui.clonebundleprefers item: %s") % p, hint=hint | |
|
3108 | ) | |
|
3109 | return p.split(b'=', 1) | |
|
3110 | ||
|
3111 | prefers = [_split(p) for p in prefers] | |
|
3072 | 3112 | |
|
3073 | 3113 | items = sorted(clonebundleentry(v, prefers) for v in entries) |
|
3074 | 3114 | return [i.value for i in items] |
@@ -787,11 +787,11 b' def disabled():' | |||
|
787 | 787 | try: |
|
788 | 788 | from hgext import __index__ # pytype: disable=import-error |
|
789 | 789 | |
|
790 |
return |
|
|
791 |
|
|
|
790 | return { | |
|
791 | name: gettext(desc) | |
|
792 | 792 | for name, desc in pycompat.iteritems(__index__.docs) |
|
793 | 793 | if name not in _order |
|
794 |
|
|
|
794 | } | |
|
795 | 795 | except (ImportError, AttributeError): |
|
796 | 796 | pass |
|
797 | 797 | |
@@ -808,18 +808,8 b' def disabled():' | |||
|
808 | 808 | return exts |
|
809 | 809 | |
|
810 | 810 | |
|
811 |
def disabled |
|
|
812 | '''find a specific disabled extension from hgext. returns desc''' | |
|
813 | try: | |
|
814 | from hgext import __index__ # pytype: disable=import-error | |
|
815 | ||
|
816 | if name in _order: # enabled | |
|
817 | return | |
|
818 | else: | |
|
819 | return gettext(__index__.docs.get(name)) | |
|
820 | except (ImportError, AttributeError): | |
|
821 | pass | |
|
822 | ||
|
811 | def disabled_help(name): | |
|
812 | """Obtain the full help text for a disabled extension, or None.""" | |
|
823 | 813 | paths = _disabledpaths() |
|
824 | 814 | if name in paths: |
|
825 | 815 | return _disabledhelp(paths[name]) |
@@ -314,7 +314,7 b' def fancyopts(args, options, state, gnu=' | |||
|
314 | 314 | argmap = {} |
|
315 | 315 | defmap = {} |
|
316 | 316 | negations = {} |
|
317 |
alllong = |
|
|
317 | alllong = {o[1] for o in options} | |
|
318 | 318 | |
|
319 | 319 | for option in options: |
|
320 | 320 | if len(option) == 5: |
@@ -58,7 +58,7 b' def dagwalker(repo, revs):' | |||
|
58 | 58 | # partition into parents in the rev set and missing parents, then |
|
59 | 59 | # augment the lists with markers, to inform graph drawing code about |
|
60 | 60 | # what kind of edge to draw between nodes. |
|
61 |
pset = |
|
|
61 | pset = {p.rev() for p in ctx.parents() if p.rev() in revs} | |
|
62 | 62 | mpars = [ |
|
63 | 63 | p.rev() |
|
64 | 64 | for p in ctx.parents() |
@@ -95,9 +95,9 b' def nodes(repo, nodes):' | |||
|
95 | 95 | include = set(nodes) |
|
96 | 96 | for node in nodes: |
|
97 | 97 | ctx = repo[node] |
|
98 |
parents = |
|
|
98 | parents = { | |
|
99 | 99 | (PARENT, p.rev()) for p in ctx.parents() if p.node() in include |
|
100 |
|
|
|
100 | } | |
|
101 | 101 | yield (ctx.rev(), CHANGESET, ctx, sorted(parents)) |
|
102 | 102 | |
|
103 | 103 |
@@ -137,7 +137,7 b' def extendrange(repo, state, nodes, good' | |||
|
137 | 137 | side = state[b'bad'] |
|
138 | 138 | else: |
|
139 | 139 | side = state[b'good'] |
|
140 |
num = len( |
|
|
140 | num = len({i.node() for i in parents} & set(side)) | |
|
141 | 141 | if num == 1: |
|
142 | 142 | return parents[0].ancestor(parents[1]) |
|
143 | 143 | return None |
@@ -153,7 +153,18 b' def extshelp(ui):' | |||
|
153 | 153 | return doc |
|
154 | 154 | |
|
155 | 155 | |
|
156 | def optrst(header, options, verbose): | |
|
156 | def parsedefaultmarker(text): | |
|
157 | """given a text 'abc (DEFAULT: def.ghi)', | |
|
158 | returns (b'abc', (b'def', b'ghi')). Otherwise return None""" | |
|
159 | if text[-1:] == b')': | |
|
160 | marker = b' (DEFAULT: ' | |
|
161 | pos = text.find(marker) | |
|
162 | if pos >= 0: | |
|
163 | item = text[pos + len(marker) : -1] | |
|
164 | return text[:pos], item.split(b'.', 2) | |
|
165 | ||
|
166 | ||
|
167 | def optrst(header, options, verbose, ui): | |
|
157 | 168 | data = [] |
|
158 | 169 | multioccur = False |
|
159 | 170 | for option in options: |
@@ -165,7 +176,14 b' def optrst(header, options, verbose):' | |||
|
165 | 176 | |
|
166 | 177 | if not verbose and any(w in desc for w in _exclkeywords): |
|
167 | 178 | continue |
|
168 | ||
|
179 | defaultstrsuffix = b'' | |
|
180 | if default is None: | |
|
181 | parseresult = parsedefaultmarker(desc) | |
|
182 | if parseresult is not None: | |
|
183 | (desc, (section, name)) = parseresult | |
|
184 | if ui.configbool(section, name): | |
|
185 | default = True | |
|
186 | defaultstrsuffix = _(b' from config') | |
|
169 | 187 | so = b'' |
|
170 | 188 | if shortopt: |
|
171 | 189 | so = b'-' + shortopt |
@@ -183,7 +201,7 b' def optrst(header, options, verbose):' | |||
|
183 | 201 | defaultstr = pycompat.bytestr(default) |
|
184 | 202 | if default is True: |
|
185 | 203 | defaultstr = _(b"on") |
|
186 | desc += _(b" (default: %s)") % defaultstr | |
|
204 | desc += _(b" (default: %s)") % (defaultstr + defaultstrsuffix) | |
|
187 | 205 | |
|
188 | 206 | if isinstance(default, list): |
|
189 | 207 | lo += b" %s [+]" % optlabel |
@@ -714,11 +732,13 b' def help_(' | |||
|
714 | 732 | |
|
715 | 733 | # options |
|
716 | 734 | if not ui.quiet and entry[1]: |
|
717 | rst.append(optrst(_(b"options"), entry[1], ui.verbose)) | |
|
735 | rst.append(optrst(_(b"options"), entry[1], ui.verbose, ui)) | |
|
718 | 736 | |
|
719 | 737 | if ui.verbose: |
|
720 | 738 | rst.append( |
|
721 | optrst(_(b"global options"), commands.globalopts, ui.verbose) | |
|
739 | optrst( | |
|
740 | _(b"global options"), commands.globalopts, ui.verbose, ui | |
|
741 | ) | |
|
722 | 742 | ) |
|
723 | 743 | |
|
724 | 744 | if not ui.verbose: |
@@ -858,7 +878,9 b' def help_(' | |||
|
858 | 878 | elif ui.verbose: |
|
859 | 879 | rst.append( |
|
860 | 880 | b'\n%s\n' |
|
861 | % optrst(_(b"global options"), commands.globalopts, ui.verbose) | |
|
881 | % optrst( | |
|
882 | _(b"global options"), commands.globalopts, ui.verbose, ui | |
|
883 | ) | |
|
862 | 884 | ) |
|
863 | 885 | if name == b'shortlist': |
|
864 | 886 | rst.append( |
@@ -944,7 +966,7 b' def help_(' | |||
|
944 | 966 | doc = gettext(pycompat.getdoc(mod)) or _(b'no help text available') |
|
945 | 967 | except KeyError: |
|
946 | 968 | mod = None |
|
947 |
doc = extensions.disabled |
|
|
969 | doc = extensions.disabled_help(name) | |
|
948 | 970 | if not doc: |
|
949 | 971 | raise error.UnknownCommand(name) |
|
950 | 972 |
@@ -888,7 +888,8 b' https://www.mercurial-scm.org/wiki/Missi' | |||
|
888 | 888 | Compression algorithm used by revlog. Supported values are `zlib` and |
|
889 | 889 | `zstd`. The `zlib` engine is the historical default of Mercurial. `zstd` is |
|
890 | 890 | a newer format that is usually a net win over `zlib`, operating faster at |
|
891 | better compression rates. Use `zstd` to reduce CPU usage. | |
|
891 | better compression rates. Use `zstd` to reduce CPU usage. Multiple values | |
|
892 | can be specified, the first available one will be used. | |
|
892 | 893 | |
|
893 | 894 | On some systems, the Mercurial installation may lack `zstd` support. |
|
894 | 895 | |
@@ -2005,12 +2006,12 b' Controls generic server settings.' | |||
|
2005 | 2006 | Level of allowed race condition between two pushing clients. |
|
2006 | 2007 | |
|
2007 | 2008 | - 'strict': push is abort if another client touched the repository |
|
2008 |
while the push was preparing. |
|
|
2009 | while the push was preparing. | |
|
2009 | 2010 | - 'check-related': push is only aborted if it affects head that got also |
|
2010 | affected while the push was preparing. | |
|
2011 | ||
|
2012 | This requires compatible client (version 4.3 and later). Old client will | |
|
2013 | use 'strict'. | |
|
2011 | affected while the push was preparing. (default since 5.4) | |
|
2012 | ||
|
2013 | 'check-related' only takes effect for compatible clients (version | |
|
2014 | 4.3 and later). Older clients will use 'strict'. | |
|
2014 | 2015 | |
|
2015 | 2016 | ``validate`` |
|
2016 | 2017 | Whether to validate the completeness of pushed changesets by |
@@ -60,12 +60,19 b' def _local(path):' | |||
|
60 | 60 | path = util.expandpath(util.urllocalpath(path)) |
|
61 | 61 | |
|
62 | 62 | try: |
|
63 | isfile = os.path.isfile(path) | |
|
63 | # we use os.stat() directly here instead of os.path.isfile() | |
|
64 | # because the latter started returning `False` on invalid path | |
|
65 | # exceptions starting in 3.8 and we care about handling | |
|
66 | # invalid paths specially here. | |
|
67 | st = os.stat(path) | |
|
68 | isfile = stat.S_ISREG(st.st_mode) | |
|
64 | 69 | # Python 2 raises TypeError, Python 3 ValueError. |
|
65 | 70 | except (TypeError, ValueError) as e: |
|
66 | 71 | raise error.Abort( |
|
67 | 72 | _(b'invalid path %s: %s') % (path, pycompat.bytestr(e)) |
|
68 | 73 | ) |
|
74 | except OSError: | |
|
75 | isfile = False | |
|
69 | 76 | |
|
70 | 77 | return isfile and bundlerepo or localrepo |
|
71 | 78 | |
@@ -688,7 +695,7 b' def clone(' | |||
|
688 | 695 | # data. |
|
689 | 696 | createopts[b'lfs'] = True |
|
690 | 697 | |
|
691 |
if extensions.disabled |
|
|
698 | if extensions.disabled_help(b'lfs'): | |
|
692 | 699 | ui.status( |
|
693 | 700 | _( |
|
694 | 701 | b'(remote is using large file support (lfs), but it is ' |
@@ -1040,10 +1047,9 b' def update(repo, node, quietempty=False,' | |||
|
1040 | 1047 | def clean(repo, node, show_stats=True, quietempty=False): |
|
1041 | 1048 | """forcibly switch the working directory to node, clobbering changes""" |
|
1042 | 1049 | stats = updaterepo(repo, node, True) |
|
1043 | repo.vfs.unlinkpath(b'graftstate', ignoremissing=True) | |
|
1050 | assert stats.unresolvedcount == 0 | |
|
1044 | 1051 | if show_stats: |
|
1045 | 1052 | _showstats(repo, stats, quietempty) |
|
1046 | return stats.unresolvedcount > 0 | |
|
1047 | 1053 | |
|
1048 | 1054 | |
|
1049 | 1055 | # naming conflict in updatetotally() |
@@ -1138,27 +1144,12 b' def updatetotally(ui, repo, checkout, br' | |||
|
1138 | 1144 | |
|
1139 | 1145 | |
|
1140 | 1146 | def merge( |
|
1141 | repo, | |
|
1142 | node, | |
|
1143 | force=None, | |
|
1144 | remind=True, | |
|
1145 | mergeforce=False, | |
|
1146 | labels=None, | |
|
1147 | abort=False, | |
|
1147 | ctx, force=False, remind=True, labels=None, | |
|
1148 | 1148 | ): |
|
1149 | 1149 | """Branch merge with node, resolving changes. Return true if any |
|
1150 | 1150 | unresolved conflicts.""" |
|
1151 | if abort: | |
|
1152 | return abortmerge(repo.ui, repo) | |
|
1153 | ||
|
1154 | stats = mergemod.update( | |
|
1155 | repo, | |
|
1156 | node, | |
|
1157 | branchmerge=True, | |
|
1158 | force=force, | |
|
1159 | mergeforce=mergeforce, | |
|
1160 | labels=labels, | |
|
1161 | ) | |
|
1151 | repo = ctx.repo() | |
|
1152 | stats = mergemod.merge(ctx, force=force, labels=labels) | |
|
1162 | 1153 | _showstats(repo, stats) |
|
1163 | 1154 | if stats.unresolvedcount: |
|
1164 | 1155 | repo.ui.status( |
@@ -1182,9 +1173,9 b' def abortmerge(ui, repo):' | |||
|
1182 | 1173 | node = repo[b'.'].hex() |
|
1183 | 1174 | |
|
1184 | 1175 | repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12]) |
|
1185 |
stats = mergemod.update(repo |
|
|
1176 | stats = mergemod.clean_update(repo[node]) | |
|
1177 | assert stats.unresolvedcount == 0 | |
|
1186 | 1178 | _showstats(repo, stats) |
|
1187 | return stats.unresolvedcount > 0 | |
|
1188 | 1179 | |
|
1189 | 1180 | |
|
1190 | 1181 | def _incoming( |
@@ -936,5 +936,5 b' def getwebsubs(repo):' | |||
|
936 | 936 | |
|
937 | 937 | def getgraphnode(repo, ctx): |
|
938 | 938 | return templatekw.getgraphnodecurrent( |
|
939 | repo, ctx | |
|
939 | repo, ctx, {} | |
|
940 | 940 | ) + templatekw.getgraphnodesymbol(ctx) |
@@ -7,6 +7,7 b'' | |||
|
7 | 7 | |
|
8 | 8 | from __future__ import absolute_import |
|
9 | 9 | |
|
10 | import contextlib | |
|
10 | 11 | import os |
|
11 | 12 | import sys |
|
12 | 13 | |
@@ -259,14 +260,13 b' def hook(ui, repo, htype, throw=False, *' | |||
|
259 | 260 | return r |
|
260 | 261 | |
|
261 | 262 | |
|
262 | def runhooks(ui, repo, htype, hooks, throw=False, **args): | |
|
263 | args = pycompat.byteskwargs(args) | |
|
264 | res = {} | |
|
263 | @contextlib.contextmanager | |
|
264 | def redirect_stdio(): | |
|
265 | """Redirects stdout to stderr, if possible.""" | |
|
266 | ||
|
265 | 267 | oldstdout = -1 |
|
266 | ||
|
267 | 268 | try: |
|
268 | for hname, cmd in hooks: | |
|
269 | if oldstdout == -1 and _redirect: | |
|
269 | if _redirect: | |
|
270 | 270 |
|
|
271 | 271 |
|
|
272 | 272 |
|
@@ -279,6 +279,26 b' def runhooks(ui, repo, htype, hooks, thr' | |||
|
279 | 279 |
|
|
280 | 280 |
|
|
281 | 281 | |
|
282 | yield | |
|
283 | ||
|
284 | finally: | |
|
285 | # The stderr is fully buffered on Windows when connected to a pipe. | |
|
286 | # A forcible flush is required to make small stderr data in the | |
|
287 | # remote side available to the client immediately. | |
|
288 | procutil.stderr.flush() | |
|
289 | ||
|
290 | if _redirect and oldstdout >= 0: | |
|
291 | procutil.stdout.flush() # write hook output to stderr fd | |
|
292 | os.dup2(oldstdout, stdoutno) | |
|
293 | os.close(oldstdout) | |
|
294 | ||
|
295 | ||
|
296 | def runhooks(ui, repo, htype, hooks, throw=False, **args): | |
|
297 | args = pycompat.byteskwargs(args) | |
|
298 | res = {} | |
|
299 | ||
|
300 | with redirect_stdio(): | |
|
301 | for hname, cmd in hooks: | |
|
282 | 302 | if cmd is _fromuntrusted: |
|
283 | 303 | if throw: |
|
284 | 304 | raise error.HookAbort( |
@@ -312,15 +332,5 b' def runhooks(ui, repo, htype, hooks, thr' | |||
|
312 | 332 | raised = False |
|
313 | 333 | |
|
314 | 334 | res[hname] = r, raised |
|
315 | finally: | |
|
316 | # The stderr is fully buffered on Windows when connected to a pipe. | |
|
317 | # A forcible flush is required to make small stderr data in the | |
|
318 | # remote side available to the client immediately. | |
|
319 | procutil.stderr.flush() | |
|
320 | ||
|
321 | if _redirect and oldstdout >= 0: | |
|
322 | procutil.stdout.flush() # write hook output to stderr fd | |
|
323 | os.dup2(oldstdout, stdoutno) | |
|
324 | os.close(oldstdout) | |
|
325 | 335 | |
|
326 | 336 | return res |
@@ -39,12 +39,15 b' class httpsendfile(object):' | |||
|
39 | 39 | self.write = self._data.write |
|
40 | 40 | self.length = os.fstat(self._data.fileno()).st_size |
|
41 | 41 | self._pos = 0 |
|
42 | self._progress = self._makeprogress() | |
|
43 | ||
|
44 | def _makeprogress(self): | |
|
42 | 45 | # We pass double the max for total because we currently have |
|
43 | 46 | # to send the bundle twice in the case of a server that |
|
44 | 47 | # requires authentication. Since we can't know until we try |
|
45 | 48 | # once whether authentication will be required, just lie to |
|
46 | 49 | # the user and maybe the push succeeds suddenly at 50%. |
|
47 |
self. |
|
|
50 | return self.ui.makeprogress( | |
|
48 | 51 | _(b'sending'), unit=_(b'kb'), total=(self.length // 1024 * 2) |
|
49 | 52 | ) |
|
50 | 53 |
@@ -985,18 +985,9 b' class imanifestdict(interfaceutil.Interf' | |||
|
985 | 985 | def hasdir(dir): |
|
986 | 986 | """Returns a bool indicating if a directory is in this manifest.""" |
|
987 | 987 | |
|
988 | def matches(match): | |
|
989 | """Generate a new manifest filtered through a matcher. | |
|
990 | ||
|
991 | Returns an object conforming to the ``imanifestdict`` interface. | |
|
992 | """ | |
|
993 | ||
|
994 | 988 | def walk(match): |
|
995 | 989 | """Generator of paths in manifest satisfying a matcher. |
|
996 | 990 | |
|
997 | This is equivalent to ``self.matches(match).iterkeys()`` except a new | |
|
998 | manifest object is not created. | |
|
999 | ||
|
1000 | 991 | If the matcher has explicit files listed and they don't exist in |
|
1001 | 992 | the manifest, ``match.bad()`` is called for each missing file. |
|
1002 | 993 | """ |
@@ -1027,8 +1018,8 b' class imanifestdict(interfaceutil.Interf' | |||
|
1027 | 1018 | def get(path, default=None): |
|
1028 | 1019 | """Obtain the node value for a path or a default value if missing.""" |
|
1029 | 1020 | |
|
1030 |
def flags(path |
|
|
1031 |
"""Return the flags value for a path |
|
|
1021 | def flags(path): | |
|
1022 | """Return the flags value for a path (default: empty bytestring).""" | |
|
1032 | 1023 | |
|
1033 | 1024 | def copy(): |
|
1034 | 1025 | """Return a copy of this manifest.""" |
@@ -1061,6 +1052,9 b' class imanifestdict(interfaceutil.Interf' | |||
|
1061 | 1052 | |
|
1062 | 1053 | Returns a 2-tuple containing ``bytearray(self.text())`` and the |
|
1063 | 1054 | delta between ``base`` and this manifest. |
|
1055 | ||
|
1056 | If this manifest implementation can't support ``fastdelta()``, | |
|
1057 | raise ``mercurial.manifest.FastdeltaUnavailable``. | |
|
1064 | 1058 | """ |
|
1065 | 1059 | |
|
1066 | 1060 | |
@@ -1071,14 +1065,6 b' class imanifestrevisionbase(interfaceuti' | |||
|
1071 | 1065 | as part of a larger interface. |
|
1072 | 1066 | """ |
|
1073 | 1067 | |
|
1074 | def new(): | |
|
1075 | """Obtain a new manifest instance. | |
|
1076 | ||
|
1077 | Returns an object conforming to the ``imanifestrevisionwritable`` | |
|
1078 | interface. The instance will be associated with the same | |
|
1079 | ``imanifestlog`` collection as this instance. | |
|
1080 | """ | |
|
1081 | ||
|
1082 | 1068 | def copy(): |
|
1083 | 1069 | """Obtain a copy of this manifest instance. |
|
1084 | 1070 |
@@ -699,6 +699,7 b' def afterhgrcload(ui, wdirvfs, hgvfs, re' | |||
|
699 | 699 | # Map of requirements to list of extensions to load automatically when |
|
700 | 700 | # requirement is present. |
|
701 | 701 | autoextensions = { |
|
702 | b'git': [b'git'], | |
|
702 | 703 | b'largefiles': [b'largefiles'], |
|
703 | 704 | b'lfs': [b'lfs'], |
|
704 | 705 | } |
@@ -932,6 +933,12 b' def resolverevlogstorevfsoptions(ui, req' | |||
|
932 | 933 | |
|
933 | 934 | if ui.configbool(b'experimental', b'rust.index'): |
|
934 | 935 | options[b'rust.index'] = True |
|
936 | if ui.configbool(b'experimental', b'exp-persistent-nodemap'): | |
|
937 | options[b'exp-persistent-nodemap'] = True | |
|
938 | if ui.configbool(b'experimental', b'exp-persistent-nodemap.mmap'): | |
|
939 | options[b'exp-persistent-nodemap.mmap'] = True | |
|
940 | if ui.configbool(b'devel', b'persistent-nodemap'): | |
|
941 | options[b'devel-force-nodemap'] = True | |
|
935 | 942 | |
|
936 | 943 | return options |
|
937 | 944 | |
@@ -1803,7 +1810,7 b' class localrepository(object):' | |||
|
1803 | 1810 | # map tag name to (node, hist) |
|
1804 | 1811 | alltags = tagsmod.findglobaltags(self.ui, self) |
|
1805 | 1812 | # map tag name to tag type |
|
1806 |
tagtypes = |
|
|
1813 | tagtypes = {tag: b'global' for tag in alltags} | |
|
1807 | 1814 | |
|
1808 | 1815 | tagsmod.readlocaltags(self.ui, self, alltags, tagtypes) |
|
1809 | 1816 | |
@@ -1816,12 +1823,10 b' class localrepository(object):' | |||
|
1816 | 1823 | if node != nullid: |
|
1817 | 1824 | tags[encoding.tolocal(name)] = node |
|
1818 | 1825 | tags[b'tip'] = self.changelog.tip() |
|
1819 |
tagtypes = |
|
|
1820 | [ | |
|
1821 | (encoding.tolocal(name), value) | |
|
1826 | tagtypes = { | |
|
1827 | encoding.tolocal(name): value | |
|
1822 | 1828 |
|
|
1823 |
|
|
|
1824 | ) | |
|
1829 | } | |
|
1825 | 1830 | return (tags, tagtypes) |
|
1826 | 1831 | |
|
1827 | 1832 | def tagtype(self, tagname): |
@@ -2173,7 +2178,8 b' class localrepository(object):' | |||
|
2173 | 2178 | ) |
|
2174 | 2179 | if hook.hashook(repo.ui, b'pretxnclose-phase'): |
|
2175 | 2180 | cl = repo.unfiltered().changelog |
|
2176 |
for rev, (old, new) in tr.changes[b'phases'] |
|
|
2181 | for revs, (old, new) in tr.changes[b'phases']: | |
|
2182 | for rev in revs: | |
|
2177 | 2183 | args = tr.hookargs.copy() |
|
2178 | 2184 | node = hex(cl.node(rev)) |
|
2179 | 2185 | args.update(phases.preparehookargs(node, old, new)) |
@@ -2226,7 +2232,7 b' class localrepository(object):' | |||
|
2226 | 2232 | ) |
|
2227 | 2233 | tr.changes[b'origrepolen'] = len(self) |
|
2228 | 2234 | tr.changes[b'obsmarkers'] = set() |
|
2229 |
tr.changes[b'phases'] = |
|
|
2235 | tr.changes[b'phases'] = [] | |
|
2230 | 2236 | tr.changes[b'bookmarks'] = {} |
|
2231 | 2237 | |
|
2232 | 2238 | tr.hookargs[b'txnid'] = txnid |
@@ -2260,8 +2266,11 b' class localrepository(object):' | |||
|
2260 | 2266 | |
|
2261 | 2267 | if hook.hashook(repo.ui, b'txnclose-phase'): |
|
2262 | 2268 | cl = repo.unfiltered().changelog |
|
2263 |
phasemv = sorted( |
|
|
2264 | for rev, (old, new) in phasemv: | |
|
2269 | phasemv = sorted( | |
|
2270 | tr.changes[b'phases'], key=lambda r: r[0][0] | |
|
2271 | ) | |
|
2272 | for revs, (old, new) in phasemv: | |
|
2273 | for rev in revs: | |
|
2265 | 2274 | args = tr.hookargs.copy() |
|
2266 | 2275 | node = hex(cl.node(rev)) |
|
2267 | 2276 | args.update(phases.preparehookargs(node, old, new)) |
@@ -2498,6 +2507,9 b' class localrepository(object):' | |||
|
2498 | 2507 | |
|
2499 | 2508 | if full: |
|
2500 | 2509 | unfi = self.unfiltered() |
|
2510 | ||
|
2511 | self.changelog.update_caches(transaction=tr) | |
|
2512 | ||
|
2501 | 2513 | rbc = unfi.revbranchcache() |
|
2502 | 2514 | for r in unfi.changelog: |
|
2503 | 2515 | rbc.branchinfo(r) |
@@ -2843,6 +2855,14 b' class localrepository(object):' | |||
|
2843 | 2855 | fparent1, fparent2 = fparent2, nullid |
|
2844 | 2856 | elif fparent2 in fparentancestors: |
|
2845 | 2857 | fparent2 = nullid |
|
2858 | elif not fparentancestors: | |
|
2859 | # TODO: this whole if-else might be simplified much more | |
|
2860 | ms = mergemod.mergestate.read(self) | |
|
2861 | if ( | |
|
2862 | fname in ms | |
|
2863 | and ms[fname] == mergemod.MERGE_RECORD_MERGED_OTHER | |
|
2864 | ): | |
|
2865 | fparent1, fparent2 = fparent2, nullid | |
|
2846 | 2866 | |
|
2847 | 2867 | # is the file changed? |
|
2848 | 2868 | text = fctx.data() |
@@ -2938,6 +2958,9 b' class localrepository(object):' | |||
|
2938 | 2958 | self, status, text, user, date, extra |
|
2939 | 2959 | ) |
|
2940 | 2960 | |
|
2961 | ms = mergemod.mergestate.read(self) | |
|
2962 | mergeutil.checkunresolved(ms) | |
|
2963 | ||
|
2941 | 2964 | # internal config: ui.allowemptycommit |
|
2942 | 2965 | allowemptycommit = ( |
|
2943 | 2966 | wctx.branch() != wctx.p1().branch() |
@@ -2947,14 +2970,13 b' class localrepository(object):' | |||
|
2947 | 2970 | or self.ui.configbool(b'ui', b'allowemptycommit') |
|
2948 | 2971 | ) |
|
2949 | 2972 | if not allowemptycommit: |
|
2973 | self.ui.debug(b'nothing to commit, clearing merge state\n') | |
|
2974 | ms.reset() | |
|
2950 | 2975 | return None |
|
2951 | 2976 | |
|
2952 | 2977 | if merge and cctx.deleted(): |
|
2953 | 2978 | raise error.Abort(_(b"cannot commit merge with missing files")) |
|
2954 | 2979 | |
|
2955 | ms = mergemod.mergestate.read(self) | |
|
2956 | mergeutil.checkunresolved(ms) | |
|
2957 | ||
|
2958 | 2980 | if editor: |
|
2959 | 2981 | cctx._text = editor(self, cctx, subs) |
|
2960 | 2982 | edited = text != cctx._text |
@@ -3572,14 +3594,17 b' def newreporequirements(ui, createopts):' | |||
|
3572 | 3594 | if ui.configbool(b'format', b'dotencode'): |
|
3573 | 3595 | requirements.add(b'dotencode') |
|
3574 | 3596 | |
|
3575 | compengine = ui.config(b'format', b'revlog-compression') | |
|
3576 |
|
|
|
3597 | compengines = ui.configlist(b'format', b'revlog-compression') | |
|
3598 | for compengine in compengines: | |
|
3599 | if compengine in util.compengines: | |
|
3600 | break | |
|
3601 | else: | |
|
3577 | 3602 | raise error.Abort( |
|
3578 | 3603 | _( |
|
3579 | b'compression engine %s defined by ' | |
|
3604 | b'compression engines %s defined by ' | |
|
3580 | 3605 | b'format.revlog-compression not available' |
|
3581 | 3606 | ) |
|
3582 | % compengine, | |
|
3607 | % b', '.join(b'"%s"' % e for e in compengines), | |
|
3583 | 3608 | hint=_( |
|
3584 | 3609 | b'run "hg debuginstall" to list available ' |
|
3585 | 3610 | b'compression engines' |
@@ -3587,7 +3612,7 b' def newreporequirements(ui, createopts):' | |||
|
3587 | 3612 | ) |
|
3588 | 3613 | |
|
3589 | 3614 | # zlib is the historical default and doesn't need an explicit requirement. |
|
3590 |
|
|
|
3615 | if compengine == b'zstd': | |
|
3591 | 3616 | requirements.add(b'revlog-compression-zstd') |
|
3592 | 3617 | elif compengine != b'zlib': |
|
3593 | 3618 | requirements.add(b'exp-compression-%s' % compengine) |
@@ -1004,7 +1004,7 b' def _graphnodeformatter(ui, displayer):' | |||
|
1004 | 1004 | ui, spec, defaults=templatekw.keywords, resources=tres |
|
1005 | 1005 | ) |
|
1006 | 1006 | |
|
1007 | def formatnode(repo, ctx): | |
|
1007 | def formatnode(repo, ctx, cache): | |
|
1008 | 1008 | props = {b'ctx': ctx, b'repo': repo} |
|
1009 | 1009 | return templ.renderdefault(props) |
|
1010 | 1010 | |
@@ -1038,8 +1038,9 b' def displaygraph(ui, repo, dag, displaye' | |||
|
1038 | 1038 | # experimental config: experimental.graphshorten |
|
1039 | 1039 | state.graphshorten = ui.configbool(b'experimental', b'graphshorten') |
|
1040 | 1040 | |
|
1041 | formatnode_cache = {} | |
|
1041 | 1042 | for rev, type, ctx, parents in dag: |
|
1042 | char = formatnode(repo, ctx) | |
|
1043 | char = formatnode(repo, ctx, formatnode_cache) | |
|
1043 | 1044 | copies = getcopies(ctx) if getcopies else None |
|
1044 | 1045 | edges = edgefn(type, char, state, rev, parents) |
|
1045 | 1046 | firstedge = next(edges) |
@@ -23,6 +23,7 b' from .pycompat import getattr' | |||
|
23 | 23 | from . import ( |
|
24 | 24 | encoding, |
|
25 | 25 | error, |
|
26 | match as matchmod, | |
|
26 | 27 | mdiff, |
|
27 | 28 | pathutil, |
|
28 | 29 | policy, |
@@ -56,7 +57,12 b' def _parse(data):' | |||
|
56 | 57 | raise ValueError(b'Manifest lines not in sorted order.') |
|
57 | 58 | prev = l |
|
58 | 59 | f, n = l.split(b'\0') |
|
59 |
|
|
|
60 | nl = len(n) | |
|
61 | if 64 < nl: | |
|
62 | # modern hash, full width | |
|
63 | yield f, bin(n[:64]), n[64:] | |
|
64 | elif 40 < nl < 45: | |
|
65 | # legacy hash, always sha1 | |
|
60 | 66 | yield f, bin(n[:40]), n[40:] |
|
61 | 67 | else: |
|
62 | 68 | yield f, bin(n), b'' |
@@ -264,9 +270,15 b' class _lazymanifest(object):' | |||
|
264 | 270 | if pos == -1: |
|
265 | 271 | return (data[1], data[2]) |
|
266 | 272 | zeropos = data.find(b'\x00', pos) |
|
273 | nlpos = data.find(b'\n', zeropos) | |
|
267 | 274 | assert 0 <= needle <= len(self.positions) |
|
268 | 275 | assert len(self.extrainfo) == len(self.positions) |
|
269 | hashval = unhexlify(data, self.extrainfo[needle], zeropos + 1, 40) | |
|
276 | hlen = nlpos - zeropos - 1 | |
|
277 | # Hashes sometimes have an extra byte tucked on the end, so | |
|
278 | # detect that. | |
|
279 | if hlen % 2: | |
|
280 | hlen -= 1 | |
|
281 | hashval = unhexlify(data, self.extrainfo[needle], zeropos + 1, hlen) | |
|
270 | 282 | flags = self._getflags(data, needle, zeropos) |
|
271 | 283 | return (hashval, flags) |
|
272 | 284 | |
@@ -291,8 +303,13 b' class _lazymanifest(object):' | |||
|
291 | 303 | b"Manifest values must be a tuple of (node, flags)." |
|
292 | 304 | ) |
|
293 | 305 | hashval = value[0] |
|
294 | if not isinstance(hashval, bytes) or not 20 <= len(hashval) <= 22: | |
|
295 | raise TypeError(b"node must be a 20-byte byte string") | |
|
306 | # hashes are either 20 or 32 bytes (sha1 or its replacement), | |
|
307 | # and allow one extra byte taht won't be persisted to disk but | |
|
308 | # is sometimes used in memory. | |
|
309 | if not isinstance(hashval, bytes) or not ( | |
|
310 | 20 <= len(hashval) <= 22 or 32 <= len(hashval) <= 34 | |
|
311 | ): | |
|
312 | raise TypeError(b"node must be a 20-byte or 32-byte byte string") | |
|
296 | 313 | flags = value[1] |
|
297 | 314 | if len(hashval) == 22: |
|
298 | 315 | hashval = hashval[:-1] |
@@ -376,8 +393,13 b' class _lazymanifest(object):' | |||
|
376 | 393 | t = self.extradata[-cur - 1] |
|
377 | 394 | l.append(self._pack(t)) |
|
378 | 395 | self.positions[i] = offset |
|
379 | if len(t[1]) > 20: | |
|
380 | self.extrainfo[i] = ord(t[1][21]) | |
|
396 | # Hashes are either 20 bytes (old sha1s) or 32 | |
|
397 | # bytes (new non-sha1). | |
|
398 | hlen = 20 | |
|
399 | if len(t[1]) > 25: | |
|
400 | hlen = 32 | |
|
401 | if len(t[1]) > hlen: | |
|
402 | self.extrainfo[i] = ord(t[1][hlen + 1]) | |
|
381 | 403 | offset += len(l[-1]) |
|
382 | 404 | i += 1 |
|
383 | 405 | self.data = b''.join(l) |
@@ -385,7 +407,11 b' class _lazymanifest(object):' | |||
|
385 | 407 | self.extradata = [] |
|
386 | 408 | |
|
387 | 409 | def _pack(self, d): |
|
388 | return d[0] + b'\x00' + hex(d[1][:20]) + d[2] + b'\n' | |
|
410 | n = d[1] | |
|
411 | if len(n) == 21 or len(n) == 33: | |
|
412 | n = n[:-1] | |
|
413 | assert len(n) == 20 or len(n) == 32 | |
|
414 | return d[0] + b'\x00' + hex(n) + d[2] + b'\n' | |
|
389 | 415 | |
|
390 | 416 | def text(self): |
|
391 | 417 | self._compact() |
@@ -461,7 +487,7 b' class manifestdict(object):' | |||
|
461 | 487 | __bool__ = __nonzero__ |
|
462 | 488 | |
|
463 | 489 | def __setitem__(self, key, node): |
|
464 |
self._lm[key] = node, self.flags(key |
|
|
490 | self._lm[key] = node, self.flags(key) | |
|
465 | 491 | |
|
466 | 492 | def __contains__(self, key): |
|
467 | 493 | if key is None: |
@@ -482,17 +508,11 b' class manifestdict(object):' | |||
|
482 | 508 | |
|
483 | 509 | def filesnotin(self, m2, match=None): |
|
484 | 510 | '''Set of files in this manifest that are not in the other''' |
|
485 | if match: | |
|
486 | m1 = self.matches(match) | |
|
487 |
m2 = m2. |
|
|
488 | return m1.filesnotin(m2) | |
|
489 | diff = self.diff(m2) | |
|
490 | files = set( | |
|
491 | filepath | |
|
492 | for filepath, hashflags in pycompat.iteritems(diff) | |
|
493 | if hashflags[1][0] is None | |
|
494 | ) | |
|
495 | return files | |
|
511 | if match is not None: | |
|
512 | match = matchmod.badmatch(match, lambda path, msg: None) | |
|
513 | sm2 = set(m2.walk(match)) | |
|
514 | return {f for f in self.walk(match) if f not in sm2} | |
|
515 | return {f for f in self if f not in m2} | |
|
496 | 516 | |
|
497 | 517 | @propertycache |
|
498 | 518 | def _dirs(self): |
@@ -531,6 +551,7 b' class manifestdict(object):' | |||
|
531 | 551 | # avoid the entire walk if we're only looking for specific files |
|
532 | 552 | if self._filesfastpath(match): |
|
533 | 553 | for fn in sorted(fset): |
|
554 | if fn in self: | |
|
534 | 555 | yield fn |
|
535 | 556 | return |
|
536 | 557 | |
@@ -549,7 +570,7 b' class manifestdict(object):' | |||
|
549 | 570 | if not self.hasdir(fn): |
|
550 | 571 | match.bad(fn, None) |
|
551 | 572 | |
|
552 | def matches(self, match): | |
|
573 | def _matches(self, match): | |
|
553 | 574 | '''generate a new manifest filtered by the match argument''' |
|
554 | 575 | if match.always(): |
|
555 | 576 | return self.copy() |
@@ -582,8 +603,8 b' class manifestdict(object):' | |||
|
582 | 603 | string. |
|
583 | 604 | ''' |
|
584 | 605 | if match: |
|
585 | m1 = self.matches(match) | |
|
586 | m2 = m2.matches(match) | |
|
606 | m1 = self._matches(match) | |
|
607 | m2 = m2._matches(match) | |
|
587 | 608 | return m1.diff(m2, clean=clean) |
|
588 | 609 | return self._lm.diff(m2._lm, clean) |
|
589 | 610 | |
@@ -596,11 +617,11 b' class manifestdict(object):' | |||
|
596 | 617 | except KeyError: |
|
597 | 618 | return default |
|
598 | 619 | |
|
599 |
def flags(self, key |
|
|
620 | def flags(self, key): | |
|
600 | 621 | try: |
|
601 | 622 | return self._lm[key][1] |
|
602 | 623 | except KeyError: |
|
603 |
return |
|
|
624 | return b'' | |
|
604 | 625 | |
|
605 | 626 | def copy(self): |
|
606 | 627 | c = manifestdict() |
@@ -764,6 +785,7 b' def _splittopdir(f):' | |||
|
764 | 785 | _noop = lambda s: None |
|
765 | 786 | |
|
766 | 787 | |
|
788 | @interfaceutil.implementer(repository.imanifestdict) | |
|
767 | 789 | class treemanifest(object): |
|
768 | 790 | def __init__(self, dir=b'', text=b''): |
|
769 | 791 | self._dir = dir |
@@ -1026,7 +1048,12 b' class treemanifest(object):' | |||
|
1026 | 1048 | self._dirs[dir] = treemanifest(self._subpath(dir)) |
|
1027 | 1049 | self._dirs[dir].__setitem__(subpath, n) |
|
1028 | 1050 | else: |
|
1029 | self._files[f] = n[:21] # to match manifestdict's behavior | |
|
1051 | # manifest nodes are either 20 bytes or 32 bytes, | |
|
1052 | # depending on the hash in use. An extra byte is | |
|
1053 | # occasionally used by hg, but won't ever be | |
|
1054 | # persisted. Trim to 21 or 33 bytes as appropriate. | |
|
1055 | trim = 21 if len(n) < 25 else 33 | |
|
1056 | self._files[f] = n[:trim] # to match manifestdict's behavior | |
|
1030 | 1057 | self._dirty = True |
|
1031 | 1058 | |
|
1032 | 1059 | def _load(self): |
@@ -1079,8 +1106,8 b' class treemanifest(object):' | |||
|
1079 | 1106 | def filesnotin(self, m2, match=None): |
|
1080 | 1107 | '''Set of files in this manifest that are not in the other''' |
|
1081 | 1108 | if match and not match.always(): |
|
1082 | m1 = self.matches(match) | |
|
1083 | m2 = m2.matches(match) | |
|
1109 | m1 = self._matches(match) | |
|
1110 | m2 = m2._matches(match) | |
|
1084 | 1111 | return m1.filesnotin(m2) |
|
1085 | 1112 | |
|
1086 | 1113 | files = set() |
@@ -1126,9 +1153,6 b' class treemanifest(object):' | |||
|
1126 | 1153 | def walk(self, match): |
|
1127 | 1154 | '''Generates matching file names. |
|
1128 | 1155 | |
|
1129 | Equivalent to manifest.matches(match).iterkeys(), but without creating | |
|
1130 | an entirely new manifest. | |
|
1131 | ||
|
1132 | 1156 | It also reports nonexistent files by marking them bad with match.bad(). |
|
1133 | 1157 | ''' |
|
1134 | 1158 | if match.always(): |
@@ -1171,16 +1195,16 b' class treemanifest(object):' | |||
|
1171 | 1195 | for f in self._dirs[p]._walk(match): |
|
1172 | 1196 | yield f |
|
1173 | 1197 | |
|
1174 | def matches(self, match): | |
|
1175 | '''generate a new manifest filtered by the match argument''' | |
|
1176 | if match.always(): | |
|
1177 | return self.copy() | |
|
1178 | ||
|
1179 | return self._matches(match) | |
|
1180 | ||
|
1181 | 1198 | def _matches(self, match): |
|
1182 | 1199 | '''recursively generate a new manifest filtered by the match argument. |
|
1183 | 1200 | ''' |
|
1201 | if match.always(): | |
|
1202 | return self.copy() | |
|
1203 | return self._matches_inner(match) | |
|
1204 | ||
|
1205 | def _matches_inner(self, match): | |
|
1206 | if match.always(): | |
|
1207 | return self.copy() | |
|
1184 | 1208 | |
|
1185 | 1209 | visit = match.visitchildrenset(self._dir[:-1]) |
|
1186 | 1210 | if visit == b'all': |
@@ -1211,7 +1235,7 b' class treemanifest(object):' | |||
|
1211 | 1235 | for dir, subm in pycompat.iteritems(self._dirs): |
|
1212 | 1236 | if visit and dir[:-1] not in visit: |
|
1213 | 1237 | continue |
|
1214 | m = subm._matches(match) | |
|
1238 | m = subm._matches_inner(match) | |
|
1215 | 1239 | if not m._isempty(): |
|
1216 | 1240 | ret._dirs[dir] = m |
|
1217 | 1241 | |
@@ -1219,6 +1243,9 b' class treemanifest(object):' | |||
|
1219 | 1243 | ret._dirty = True |
|
1220 | 1244 | return ret |
|
1221 | 1245 | |
|
1246 | def fastdelta(self, base, changes): | |
|
1247 | raise FastdeltaUnavailable() | |
|
1248 | ||
|
1222 | 1249 | def diff(self, m2, match=None, clean=False): |
|
1223 | 1250 | '''Finds changes between the current manifest and m2. |
|
1224 | 1251 | |
@@ -1235,8 +1262,8 b' class treemanifest(object):' | |||
|
1235 | 1262 | string. |
|
1236 | 1263 | ''' |
|
1237 | 1264 | if match and not match.always(): |
|
1238 | m1 = self.matches(match) | |
|
1239 | m2 = m2.matches(match) | |
|
1265 | m1 = self._matches(match) | |
|
1266 | m2 = m2._matches(match) | |
|
1240 | 1267 | return m1.diff(m2, clean=clean) |
|
1241 | 1268 | result = {} |
|
1242 | 1269 | emptytree = treemanifest() |
@@ -1405,6 +1432,7 b' class manifestfulltextcache(util.lrucach' | |||
|
1405 | 1432 | set = super(manifestfulltextcache, self).__setitem__ |
|
1406 | 1433 | # ignore trailing data, this is a cache, corruption is skipped |
|
1407 | 1434 | while True: |
|
1435 | # TODO do we need to do work here for sha1 portability? | |
|
1408 | 1436 | node = fp.read(20) |
|
1409 | 1437 | if len(node) < 20: |
|
1410 | 1438 | break |
@@ -1495,6 +1523,10 b' class manifestfulltextcache(util.lrucach' | |||
|
1495 | 1523 | MAXCOMPRESSION = 3 |
|
1496 | 1524 | |
|
1497 | 1525 | |
|
1526 | class FastdeltaUnavailable(Exception): | |
|
1527 | """Exception raised when fastdelta isn't usable on a manifest.""" | |
|
1528 | ||
|
1529 | ||
|
1498 | 1530 | @interfaceutil.implementer(repository.imanifeststorage) |
|
1499 | 1531 | class manifestrevlog(object): |
|
1500 | 1532 | '''A revlog that stores manifest texts. This is responsible for caching the |
@@ -1621,7 +1653,9 b' class manifestrevlog(object):' | |||
|
1621 | 1653 | readtree=None, |
|
1622 | 1654 | match=None, |
|
1623 | 1655 | ): |
|
1624 | if p1 in self.fulltextcache and util.safehasattr(m, b'fastdelta'): | |
|
1656 | try: | |
|
1657 | if p1 not in self.fulltextcache: | |
|
1658 | raise FastdeltaUnavailable() | |
|
1625 | 1659 | # If our first parent is in the manifest cache, we can |
|
1626 | 1660 | # compute a delta here using properties we know about the |
|
1627 | 1661 | # manifest up-front, which may save time later for the |
@@ -1640,11 +1674,12 b' class manifestrevlog(object):' | |||
|
1640 | 1674 | n = self._revlog.addrevision( |
|
1641 | 1675 | text, transaction, link, p1, p2, cachedelta |
|
1642 | 1676 | ) |
|
1643 | else: | |
|
1644 |
# The first parent manifest isn't already loaded |
|
|
1645 | # just encode a fulltext of the manifest and pass that | |
|
1646 | # through to the revlog layer, and let it handle the delta | |
|
1647 | # process. | |
|
1677 | except FastdeltaUnavailable: | |
|
1678 | # The first parent manifest isn't already loaded or the | |
|
1679 | # manifest implementation doesn't support fastdelta, so | |
|
1680 | # we'll just encode a fulltext of the manifest and pass | |
|
1681 | # that through to the revlog layer, and let it handle the | |
|
1682 | # delta process. | |
|
1648 | 1683 | if self._treeondisk: |
|
1649 | 1684 | assert readtree, b"readtree must be set for treemanifest writes" |
|
1650 | 1685 | assert match, b"match must be specified for treemanifest writes" |
@@ -1923,9 +1958,6 b' class memmanifestctx(object):' | |||
|
1923 | 1958 | def _storage(self): |
|
1924 | 1959 | return self._manifestlog.getstorage(b'') |
|
1925 | 1960 | |
|
1926 | def new(self): | |
|
1927 | return memmanifestctx(self._manifestlog) | |
|
1928 | ||
|
1929 | 1961 | def copy(self): |
|
1930 | 1962 | memmf = memmanifestctx(self._manifestlog) |
|
1931 | 1963 | memmf._manifestdict = self.read().copy() |
@@ -1972,9 +2004,6 b' class manifestctx(object):' | |||
|
1972 | 2004 | def node(self): |
|
1973 | 2005 | return self._node |
|
1974 | 2006 | |
|
1975 | def new(self): | |
|
1976 | return memmanifestctx(self._manifestlog) | |
|
1977 | ||
|
1978 | 2007 | def copy(self): |
|
1979 | 2008 | memmf = memmanifestctx(self._manifestlog) |
|
1980 | 2009 | memmf._manifestdict = self.read().copy() |
@@ -2039,9 +2068,6 b' class memtreemanifestctx(object):' | |||
|
2039 | 2068 | def _storage(self): |
|
2040 | 2069 | return self._manifestlog.getstorage(b'') |
|
2041 | 2070 | |
|
2042 | def new(self, dir=b''): | |
|
2043 | return memtreemanifestctx(self._manifestlog, dir=dir) | |
|
2044 | ||
|
2045 | 2071 | def copy(self): |
|
2046 | 2072 | memmf = memtreemanifestctx(self._manifestlog, dir=self._dir) |
|
2047 | 2073 | memmf._treemanifest = self._treemanifest.copy() |
@@ -2124,9 +2150,6 b' class treemanifestctx(object):' | |||
|
2124 | 2150 | def node(self): |
|
2125 | 2151 | return self._node |
|
2126 | 2152 | |
|
2127 | def new(self, dir=b''): | |
|
2128 | return memtreemanifestctx(self._manifestlog, dir=dir) | |
|
2129 | ||
|
2130 | 2153 | def copy(self): |
|
2131 | 2154 | memmf = memtreemanifestctx(self._manifestlog, dir=self._dir) |
|
2132 | 2155 | memmf._treemanifest = self.read().copy() |
@@ -24,7 +24,7 b' from . import (' | |||
|
24 | 24 | ) |
|
25 | 25 | from .utils import stringutil |
|
26 | 26 | |
|
27 |
rustmod = policy.importrust(' |
|
|
27 | rustmod = policy.importrust('dirstate') | |
|
28 | 28 | |
|
29 | 29 | allpatternkinds = ( |
|
30 | 30 | b're', |
@@ -666,7 +666,10 b' class _dirchildren(object):' | |||
|
666 | 666 | class includematcher(basematcher): |
|
667 | 667 | def __init__(self, root, kindpats, badfn=None): |
|
668 | 668 | super(includematcher, self).__init__(badfn) |
|
669 | ||
|
669 | if rustmod is not None: | |
|
670 | # We need to pass the patterns to Rust because they can contain | |
|
671 | # patterns from the user interface | |
|
672 | self._kindpats = kindpats | |
|
670 | 673 | self._pats, self.matchfn = _buildmatch(kindpats, b'(?:/|$)', root) |
|
671 | 674 | self._prefix = _prefix(kindpats) |
|
672 | 675 | roots, dirs, parents = _rootsdirsandparents(kindpats) |
@@ -772,7 +775,7 b' class exactmatcher(basematcher):' | |||
|
772 | 775 | candidates = self._fileset | self._dirs - {b''} |
|
773 | 776 | if dir != b'': |
|
774 | 777 | d = dir + b'/' |
|
775 |
candidates = |
|
|
778 | candidates = {c[len(d) :] for c in candidates if c.startswith(d)} | |
|
776 | 779 | # self._dirs includes all of the directories, recursively, so if |
|
777 | 780 | # we're attempting to match foo/bar/baz.txt, it'll have '', 'foo', |
|
778 | 781 | # 'foo/bar' in it. Thus we can safely ignore a candidate that has a |
@@ -1273,15 +1276,6 b' def _regex(kind, pat, globsuffix):' | |||
|
1273 | 1276 | '''Convert a (normalized) pattern of any kind into a |
|
1274 | 1277 | regular expression. |
|
1275 | 1278 | globsuffix is appended to the regexp of globs.''' |
|
1276 | ||
|
1277 | if rustmod is not None: | |
|
1278 | try: | |
|
1279 | return rustmod.build_single_regex(kind, pat, globsuffix) | |
|
1280 | except rustmod.PatternError: | |
|
1281 | raise error.ProgrammingError( | |
|
1282 | b'not a regex pattern: %s:%s' % (kind, pat) | |
|
1283 | ) | |
|
1284 | ||
|
1285 | 1279 | if not pat and kind in (b'glob', b'relpath'): |
|
1286 | 1280 | return b'' |
|
1287 | 1281 | if kind == b're': |
@@ -1554,18 +1548,6 b' def readpatternfile(filepath, warn, sour' | |||
|
1554 | 1548 | This is useful to debug ignore patterns. |
|
1555 | 1549 | ''' |
|
1556 | 1550 | |
|
1557 | if rustmod is not None: | |
|
1558 | result, warnings = rustmod.read_pattern_file( | |
|
1559 | filepath, bool(warn), sourceinfo, | |
|
1560 | ) | |
|
1561 | ||
|
1562 | for warning_params in warnings: | |
|
1563 | # Can't be easily emitted from Rust, because it would require | |
|
1564 | # a mechanism for both gettext and calling the `warn` function. | |
|
1565 | warn(_(b"%s: ignoring invalid syntax '%s'\n") % warning_params) | |
|
1566 | ||
|
1567 | return result | |
|
1568 | ||
|
1569 | 1551 | syntaxes = { |
|
1570 | 1552 | b're': b'relre:', |
|
1571 | 1553 | b'regexp': b'relre:', |
@@ -91,7 +91,7 b' class diffopts(object):' | |||
|
91 | 91 | ) |
|
92 | 92 | |
|
93 | 93 | def copy(self, **kwargs): |
|
94 |
opts = |
|
|
94 | opts = {k: getattr(self, k) for k in self.defaults} | |
|
95 | 95 | opts = pycompat.strkwargs(opts) |
|
96 | 96 | opts.update(kwargs) |
|
97 | 97 | return diffopts(**opts) |
@@ -64,6 +64,7 b" RECORD_LABELS = b'l'" | |||
|
64 | 64 | RECORD_OVERRIDE = b't' |
|
65 | 65 | RECORD_UNSUPPORTED_MANDATORY = b'X' |
|
66 | 66 | RECORD_UNSUPPORTED_ADVISORY = b'x' |
|
67 | RECORD_RESOLVED_OTHER = b'R' | |
|
67 | 68 | |
|
68 | 69 | MERGE_DRIVER_STATE_UNMARKED = b'u' |
|
69 | 70 | MERGE_DRIVER_STATE_MARKED = b'm' |
@@ -74,6 +75,9 b" MERGE_RECORD_RESOLVED = b'r'" | |||
|
74 | 75 | MERGE_RECORD_UNRESOLVED_PATH = b'pu' |
|
75 | 76 | MERGE_RECORD_RESOLVED_PATH = b'pr' |
|
76 | 77 | MERGE_RECORD_DRIVER_RESOLVED = b'd' |
|
78 | # represents that the file was automatically merged in favor | |
|
79 | # of other version. This info is used on commit. | |
|
80 | MERGE_RECORD_MERGED_OTHER = b'o' | |
|
77 | 81 | |
|
78 | 82 | ACTION_FORGET = b'f' |
|
79 | 83 | ACTION_REMOVE = b'r' |
@@ -91,6 +95,8 b" ACTION_DIR_RENAME_MOVE_LOCAL = b'dm'" | |||
|
91 | 95 | ACTION_KEEP = b'k' |
|
92 | 96 | ACTION_EXEC = b'e' |
|
93 | 97 | ACTION_CREATED_MERGE = b'cm' |
|
98 | # GET the other/remote side and store this info in mergestate | |
|
99 | ACTION_GET_OTHER_AND_STORE = b'gs' | |
|
94 | 100 | |
|
95 | 101 | |
|
96 | 102 | class mergestate(object): |
@@ -227,6 +233,7 b' class mergestate(object):' | |||
|
227 | 233 | RECORD_CHANGEDELETE_CONFLICT, |
|
228 | 234 | RECORD_PATH_CONFLICT, |
|
229 | 235 | RECORD_MERGE_DRIVER_MERGE, |
|
236 | RECORD_RESOLVED_OTHER, | |
|
230 | 237 | ): |
|
231 | 238 | bits = record.split(b'\0') |
|
232 | 239 | self._state[bits[0]] = bits[1:] |
@@ -386,18 +393,26 b' class mergestate(object):' | |||
|
386 | 393 | return configmergedriver |
|
387 | 394 | |
|
388 | 395 | @util.propertycache |
|
389 |
def local |
|
|
396 | def local(self): | |
|
390 | 397 | if self._local is None: |
|
391 |
msg = b"local |
|
|
398 | msg = b"local accessed but self._local isn't set" | |
|
392 | 399 | raise error.ProgrammingError(msg) |
|
393 |
return self. |
|
|
400 | return self._local | |
|
401 | ||
|
402 | @util.propertycache | |
|
403 | def localctx(self): | |
|
404 | return self._repo[self.local] | |
|
405 | ||
|
406 | @util.propertycache | |
|
407 | def other(self): | |
|
408 | if self._other is None: | |
|
409 | msg = b"other accessed but self._other isn't set" | |
|
410 | raise error.ProgrammingError(msg) | |
|
411 | return self._other | |
|
394 | 412 | |
|
395 | 413 | @util.propertycache |
|
396 | 414 | def otherctx(self): |
|
397 | if self._other is None: | |
|
398 | msg = b"otherctx accessed but self._other isn't set" | |
|
399 | raise error.ProgrammingError(msg) | |
|
400 | return self._repo[self._other] | |
|
415 | return self._repo[self.other] | |
|
401 | 416 | |
|
402 | 417 | def active(self): |
|
403 | 418 | """Whether mergestate is active. |
@@ -405,14 +420,7 b' class mergestate(object):' | |||
|
405 | 420 | Returns True if there appears to be mergestate. This is a rough proxy |
|
406 | 421 | for "is a merge in progress." |
|
407 | 422 | """ |
|
408 | # Check local variables before looking at filesystem for performance | |
|
409 | # reasons. | |
|
410 | return ( | |
|
411 | bool(self._local) | |
|
412 | or bool(self._state) | |
|
413 | or self._repo.vfs.exists(self.statepathv1) | |
|
414 | or self._repo.vfs.exists(self.statepathv2) | |
|
415 | ) | |
|
423 | return bool(self._local) or bool(self._state) | |
|
416 | 424 | |
|
417 | 425 | def commit(self): |
|
418 | 426 | """Write current state on disk (if necessary)""" |
@@ -452,6 +460,10 b' class mergestate(object):' | |||
|
452 | 460 | records.append( |
|
453 | 461 | (RECORD_PATH_CONFLICT, b'\0'.join([filename] + v)) |
|
454 | 462 | ) |
|
463 | elif v[0] == MERGE_RECORD_MERGED_OTHER: | |
|
464 | records.append( | |
|
465 | (RECORD_RESOLVED_OTHER, b'\0'.join([filename] + v)) | |
|
466 | ) | |
|
455 | 467 | elif v[1] == nullhex or v[6] == nullhex: |
|
456 | 468 | # Change/Delete or Delete/Change conflicts. These are stored in |
|
457 | 469 | # 'C' records. v[1] is the local file, and is nullhex when the |
@@ -550,6 +562,10 b' class mergestate(object):' | |||
|
550 | 562 | self._state[path] = [MERGE_RECORD_UNRESOLVED_PATH, frename, forigin] |
|
551 | 563 | self._dirty = True |
|
552 | 564 | |
|
565 | def addmergedother(self, path): | |
|
566 | self._state[path] = [MERGE_RECORD_MERGED_OTHER, nullhex, nullhex] | |
|
567 | self._dirty = True | |
|
568 | ||
|
553 | 569 | def __contains__(self, dfile): |
|
554 | 570 | return dfile in self._state |
|
555 | 571 | |
@@ -593,6 +609,8 b' class mergestate(object):' | |||
|
593 | 609 | """rerun merge process for file path `dfile`""" |
|
594 | 610 | if self[dfile] in (MERGE_RECORD_RESOLVED, MERGE_RECORD_DRIVER_RESOLVED): |
|
595 | 611 | return True, 0 |
|
612 | if self._state[dfile][0] == MERGE_RECORD_MERGED_OTHER: | |
|
613 | return True, 0 | |
|
596 | 614 | stateentry = self._state[dfile] |
|
597 | 615 | state, localkey, lfile, afile, anode, ofile, onode, flags = stateentry |
|
598 | 616 | octx = self._repo[self._other] |
@@ -989,11 +1007,10 b' def _checkcollision(repo, wmf, actions):' | |||
|
989 | 1007 | """ |
|
990 | 1008 | Check for case-folding collisions. |
|
991 | 1009 | """ |
|
992 | ||
|
993 | 1010 | # If the repo is narrowed, filter out files outside the narrowspec. |
|
994 | 1011 | narrowmatch = repo.narrowmatch() |
|
995 | 1012 | if not narrowmatch.always(): |
|
996 |
|
|
|
1013 | pmmf = set(wmf.walk(narrowmatch)) | |
|
997 | 1014 | if actions: |
|
998 | 1015 | narrowactions = {} |
|
999 | 1016 | for m, actionsfortype in pycompat.iteritems(actions): |
@@ -1002,7 +1019,7 b' def _checkcollision(repo, wmf, actions):' | |||
|
1002 | 1019 | if narrowmatch(f): |
|
1003 | 1020 | narrowactions[m].append((f, args, msg)) |
|
1004 | 1021 | actions = narrowactions |
|
1005 | ||
|
1022 | else: | |
|
1006 | 1023 | # build provisional merged manifest up |
|
1007 | 1024 | pmmf = set(wmf) |
|
1008 | 1025 | |
@@ -1209,7 +1226,7 b' def _filternarrowactions(narrowmatch, br' | |||
|
1209 | 1226 | narrowed. |
|
1210 | 1227 | """ |
|
1211 | 1228 | nooptypes = {b'k'} # TODO: handle with nonconflicttypes |
|
1212 | nonconflicttypes = set(b'a am c cm f g r e'.split()) | |
|
1229 | nonconflicttypes = set(b'a am c cm f g gs r e'.split()) | |
|
1213 | 1230 | # We mutate the items in the dict during iteration, so iterate |
|
1214 | 1231 | # over a copy. |
|
1215 | 1232 | for f, action in list(actions.items()): |
@@ -1256,17 +1273,19 b' def manifestmerge(' | |||
|
1256 | 1273 | if matcher is not None and matcher.always(): |
|
1257 | 1274 | matcher = None |
|
1258 | 1275 | |
|
1259 | copy, movewithdir, diverge, renamedelete, dirmove = {}, {}, {}, {}, {} | |
|
1260 | ||
|
1261 | 1276 | # manifests fetched in order are going to be faster, so prime the caches |
|
1262 | 1277 | [ |
|
1263 | 1278 | x.manifest() |
|
1264 | 1279 | for x in sorted(wctx.parents() + [p2, pa], key=scmutil.intrev) |
|
1265 | 1280 | ] |
|
1266 | 1281 | |
|
1282 | branch_copies1 = copies.branch_copies() | |
|
1283 | branch_copies2 = copies.branch_copies() | |
|
1284 | diverge = {} | |
|
1267 | 1285 | if followcopies: |
|
1268 | ret = copies.mergecopies(repo, wctx, p2, pa) | |
|
1269 | copy, movewithdir, diverge, renamedelete, dirmove = ret | |
|
1286 | branch_copies1, branch_copies2, diverge = copies.mergecopies( | |
|
1287 | repo, wctx, p2, pa | |
|
1288 | ) | |
|
1270 | 1289 | |
|
1271 | 1290 | boolbm = pycompat.bytestr(bool(branchmerge)) |
|
1272 | 1291 | boolf = pycompat.bytestr(bool(force)) |
@@ -1278,8 +1297,10 b' def manifestmerge(' | |||
|
1278 | 1297 | repo.ui.debug(b" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2)) |
|
1279 | 1298 | |
|
1280 | 1299 | m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest() |
|
1281 | copied = set(copy.values()) | |
|
1282 | copied.update(movewithdir.values()) | |
|
1300 | copied1 = set(branch_copies1.copy.values()) | |
|
1301 | copied1.update(branch_copies1.movewithdir.values()) | |
|
1302 | copied2 = set(branch_copies2.copy.values()) | |
|
1303 | copied2.update(branch_copies2.movewithdir.values()) | |
|
1283 | 1304 | |
|
1284 | 1305 | if b'.hgsubstate' in m1 and wctx.rev() is None: |
|
1285 | 1306 | # Check whether sub state is modified, and overwrite the manifest |
@@ -1299,10 +1320,10 b' def manifestmerge(' | |||
|
1299 | 1320 | relevantfiles = set(ma.diff(m2).keys()) |
|
1300 | 1321 | |
|
1301 | 1322 | # For copied and moved files, we need to add the source file too. |
|
1302 | for copykey, copyvalue in pycompat.iteritems(copy): | |
|
1323 | for copykey, copyvalue in pycompat.iteritems(branch_copies1.copy): | |
|
1303 | 1324 | if copyvalue in relevantfiles: |
|
1304 | 1325 | relevantfiles.add(copykey) |
|
1305 | for movedirkey in movewithdir: | |
|
1326 | for movedirkey in branch_copies1.movewithdir: | |
|
1306 | 1327 | relevantfiles.add(movedirkey) |
|
1307 | 1328 | filesmatcher = scmutil.matchfiles(repo, relevantfiles) |
|
1308 | 1329 | matcher = matchmod.intersectmatchers(matcher, filesmatcher) |
@@ -1313,7 +1334,10 b' def manifestmerge(' | |||
|
1313 | 1334 | for f, ((n1, fl1), (n2, fl2)) in pycompat.iteritems(diff): |
|
1314 | 1335 | if n1 and n2: # file exists on both local and remote side |
|
1315 | 1336 | if f not in ma: |
|
1316 | fa = copy.get(f, None) | |
|
1337 | # TODO: what if they're renamed from different sources? | |
|
1338 | fa = branch_copies1.copy.get( | |
|
1339 | f, None | |
|
1340 | ) or branch_copies2.copy.get(f, None) | |
|
1317 | 1341 | if fa is not None: |
|
1318 | 1342 | actions[f] = ( |
|
1319 | 1343 | ACTION_MERGE, |
@@ -1341,14 +1365,22 b' def manifestmerge(' | |||
|
1341 | 1365 | ) |
|
1342 | 1366 | else: |
|
1343 | 1367 | actions[f] = ( |
|
1344 |
ACTION_GET |
|
|
1368 | ACTION_GET_OTHER_AND_STORE | |
|
1369 | if branchmerge | |
|
1370 | else ACTION_GET, | |
|
1345 | 1371 | (fl2, False), |
|
1346 | 1372 | b'remote is newer', |
|
1347 | 1373 | ) |
|
1348 | 1374 | elif nol and n2 == a: # remote only changed 'x' |
|
1349 | 1375 | actions[f] = (ACTION_EXEC, (fl2,), b'update permissions') |
|
1350 | 1376 | elif nol and n1 == a: # local only changed 'x' |
|
1351 |
actions[f] = ( |
|
|
1377 | actions[f] = ( | |
|
1378 | ACTION_GET_OTHER_AND_STORE | |
|
1379 | if branchmerge | |
|
1380 | else ACTION_GET, | |
|
1381 | (fl1, False), | |
|
1382 | b'remote is newer', | |
|
1383 | ) | |
|
1352 | 1384 | else: # both changed something |
|
1353 | 1385 | actions[f] = ( |
|
1354 | 1386 | ACTION_MERGE, |
@@ -1356,10 +1388,12 b' def manifestmerge(' | |||
|
1356 | 1388 | b'versions differ', |
|
1357 | 1389 | ) |
|
1358 | 1390 | elif n1: # file exists only on local side |
|
1359 | if f in copied: | |
|
1391 | if f in copied2: | |
|
1360 | 1392 | pass # we'll deal with it on m2 side |
|
1361 | elif f in movewithdir: # directory rename, move local | |
|
1362 |
f |
|
|
1393 | elif ( | |
|
1394 | f in branch_copies1.movewithdir | |
|
1395 | ): # directory rename, move local | |
|
1396 | f2 = branch_copies1.movewithdir[f] | |
|
1363 | 1397 | if f2 in m2: |
|
1364 | 1398 | actions[f2] = ( |
|
1365 | 1399 | ACTION_MERGE, |
@@ -1372,8 +1406,8 b' def manifestmerge(' | |||
|
1372 | 1406 | (f, fl1), |
|
1373 | 1407 | b'remote directory rename - move from %s' % f, |
|
1374 | 1408 | ) |
|
1375 | elif f in copy: | |
|
1376 | f2 = copy[f] | |
|
1409 | elif f in branch_copies1.copy: | |
|
1410 | f2 = branch_copies1.copy[f] | |
|
1377 | 1411 | actions[f] = ( |
|
1378 | 1412 | ACTION_MERGE, |
|
1379 | 1413 | (f, f2, f2, False, pa.node()), |
@@ -1397,10 +1431,10 b' def manifestmerge(' | |||
|
1397 | 1431 | else: |
|
1398 | 1432 | actions[f] = (ACTION_REMOVE, None, b'other deleted') |
|
1399 | 1433 | elif n2: # file exists only on remote side |
|
1400 | if f in copied: | |
|
1434 | if f in copied1: | |
|
1401 | 1435 | pass # we'll deal with it on m1 side |
|
1402 | elif f in movewithdir: | |
|
1403 | f2 = movewithdir[f] | |
|
1436 | elif f in branch_copies2.movewithdir: | |
|
1437 | f2 = branch_copies2.movewithdir[f] | |
|
1404 | 1438 | if f2 in m1: |
|
1405 | 1439 | actions[f2] = ( |
|
1406 | 1440 | ACTION_MERGE, |
@@ -1413,8 +1447,8 b' def manifestmerge(' | |||
|
1413 | 1447 | (f, fl2), |
|
1414 | 1448 | b'local directory rename - get from %s' % f, |
|
1415 | 1449 | ) |
|
1416 | elif f in copy: | |
|
1417 | f2 = copy[f] | |
|
1450 | elif f in branch_copies2.copy: | |
|
1451 | f2 = branch_copies2.copy[f] | |
|
1418 | 1452 | if f2 in m2: |
|
1419 | 1453 | actions[f] = ( |
|
1420 | 1454 | ACTION_MERGE, |
@@ -1451,10 +1485,10 b' def manifestmerge(' | |||
|
1451 | 1485 | ) |
|
1452 | 1486 | elif n2 != ma[f]: |
|
1453 | 1487 | df = None |
|
1454 | for d in dirmove: | |
|
1488 | for d in branch_copies1.dirmove: | |
|
1455 | 1489 | if f.startswith(d): |
|
1456 | 1490 | # new file added in a directory that was moved |
|
1457 | df = dirmove[d] + f[len(d) :] | |
|
1491 | df = branch_copies1.dirmove[d] + f[len(d) :] | |
|
1458 | 1492 | break |
|
1459 | 1493 | if df is not None and df in m1: |
|
1460 | 1494 | actions[df] = ( |
@@ -1481,6 +1515,9 b' def manifestmerge(' | |||
|
1481 | 1515 | # Updates "actions" in place |
|
1482 | 1516 | _filternarrowactions(narrowmatch, branchmerge, actions) |
|
1483 | 1517 | |
|
1518 | renamedelete = branch_copies1.renamedelete | |
|
1519 | renamedelete.update(branch_copies2.renamedelete) | |
|
1520 | ||
|
1484 | 1521 | return actions, diverge, renamedelete |
|
1485 | 1522 | |
|
1486 | 1523 | |
@@ -1576,6 +1613,8 b' def calculateupdates(' | |||
|
1576 | 1613 | |
|
1577 | 1614 | for f, a in sorted(pycompat.iteritems(actions)): |
|
1578 | 1615 | m, args, msg = a |
|
1616 | if m == ACTION_GET_OTHER_AND_STORE: | |
|
1617 | m = ACTION_GET | |
|
1579 | 1618 | repo.ui.debug(b' %s: %s -> %s\n' % (f, msg, m)) |
|
1580 | 1619 | if f in fbids: |
|
1581 | 1620 | d = fbids[f] |
@@ -1784,8 +1823,8 b' class updateresult(object):' | |||
|
1784 | 1823 | |
|
1785 | 1824 | def emptyactions(): |
|
1786 | 1825 | """create an actions dict, to be populated and passed to applyupdates()""" |
|
1787 |
return |
|
|
1788 |
|
|
|
1826 | return { | |
|
1827 | m: [] | |
|
1789 | 1828 | for m in ( |
|
1790 | 1829 | ACTION_ADD, |
|
1791 | 1830 | ACTION_ADD_MODIFIED, |
@@ -1801,8 +1840,9 b' def emptyactions():' | |||
|
1801 | 1840 | ACTION_KEEP, |
|
1802 | 1841 | ACTION_PATH_CONFLICT, |
|
1803 | 1842 | ACTION_PATH_CONFLICT_RESOLVE, |
|
1843 | ACTION_GET_OTHER_AND_STORE, | |
|
1804 | 1844 | ) |
|
1805 |
|
|
|
1845 | } | |
|
1806 | 1846 | |
|
1807 | 1847 | |
|
1808 | 1848 | def applyupdates( |
@@ -1823,6 +1863,11 b' def applyupdates(' | |||
|
1823 | 1863 | |
|
1824 | 1864 | updated, merged, removed = 0, 0, 0 |
|
1825 | 1865 | ms = mergestate.clean(repo, wctx.p1().node(), mctx.node(), labels) |
|
1866 | ||
|
1867 | # add ACTION_GET_OTHER_AND_STORE to mergestate | |
|
1868 | for e in actions[ACTION_GET_OTHER_AND_STORE]: | |
|
1869 | ms.addmergedother(e[0]) | |
|
1870 | ||
|
1826 | 1871 | moves = [] |
|
1827 | 1872 | for m, l in actions.items(): |
|
1828 | 1873 | l.sort() |
@@ -2058,7 +2103,7 b' def applyupdates(' | |||
|
2058 | 2103 | |
|
2059 | 2104 | extraactions = ms.actions() |
|
2060 | 2105 | if extraactions: |
|
2061 |
mfiles = |
|
|
2106 | mfiles = {a[0] for a in actions[ACTION_MERGE]} | |
|
2062 | 2107 | for k, acts in pycompat.iteritems(extraactions): |
|
2063 | 2108 | actions[k].extend(acts) |
|
2064 | 2109 | if k == ACTION_GET and wantfiledata: |
@@ -2205,6 +2250,7 b' def update(' | |||
|
2205 | 2250 | labels=None, |
|
2206 | 2251 | matcher=None, |
|
2207 | 2252 | mergeforce=False, |
|
2253 | updatedirstate=True, | |
|
2208 | 2254 | updatecheck=None, |
|
2209 | 2255 | wc=None, |
|
2210 | 2256 | ): |
@@ -2288,13 +2334,6 b' def update(' | |||
|
2288 | 2334 | ), |
|
2289 | 2335 | ) |
|
2290 | 2336 | ) |
|
2291 | # If we're doing a partial update, we need to skip updating | |
|
2292 | # the dirstate, so make a note of any partial-ness to the | |
|
2293 | # update here. | |
|
2294 | if matcher is None or matcher.always(): | |
|
2295 | partial = False | |
|
2296 | else: | |
|
2297 | partial = True | |
|
2298 | 2337 | with repo.wlock(): |
|
2299 | 2338 | if wc is None: |
|
2300 | 2339 | wc = repo[None] |
@@ -2409,6 +2448,7 b' def update(' | |||
|
2409 | 2448 | ACTION_EXEC, |
|
2410 | 2449 | ACTION_REMOVE, |
|
2411 | 2450 | ACTION_PATH_CONFLICT_RESOLVE, |
|
2451 | ACTION_GET_OTHER_AND_STORE, | |
|
2412 | 2452 | ): |
|
2413 | 2453 | msg = _(b"conflicting changes") |
|
2414 | 2454 | hint = _(b"commit or update --clean to discard changes") |
@@ -2471,6 +2511,10 b' def update(' | |||
|
2471 | 2511 | actions[m] = [] |
|
2472 | 2512 | actions[m].append((f, args, msg)) |
|
2473 | 2513 | |
|
2514 | # ACTION_GET_OTHER_AND_STORE is a ACTION_GET + store in mergestate | |
|
2515 | for e in actions[ACTION_GET_OTHER_AND_STORE]: | |
|
2516 | actions[ACTION_GET].append(e) | |
|
2517 | ||
|
2474 | 2518 | if not util.fscasesensitive(repo.path): |
|
2475 | 2519 | # check collision between files only in p2 for clean update |
|
2476 | 2520 | if not branchmerge and ( |
@@ -2507,7 +2551,11 b' def update(' | |||
|
2507 | 2551 | ### apply phase |
|
2508 | 2552 | if not branchmerge: # just jump to the new rev |
|
2509 | 2553 | fp1, fp2, xp1, xp2 = fp2, nullid, xp2, b'' |
|
2510 | if not partial and not wc.isinmemory(): | |
|
2554 | # If we're doing a partial update, we need to skip updating | |
|
2555 | # the dirstate. | |
|
2556 | always = matcher is None or matcher.always() | |
|
2557 | updatedirstate = updatedirstate and always and not wc.isinmemory() | |
|
2558 | if updatedirstate: | |
|
2511 | 2559 | repo.hook(b'preupdate', throw=True, parent1=xp1, parent2=xp2) |
|
2512 | 2560 | # note that we're in the middle of an update |
|
2513 | 2561 | repo.vfs.write(b'updatestate', p2.hex()) |
@@ -2553,7 +2601,6 b' def update(' | |||
|
2553 | 2601 | ) |
|
2554 | 2602 | ) |
|
2555 | 2603 | |
|
2556 | updatedirstate = not partial and not wc.isinmemory() | |
|
2557 | 2604 | wantfiledata = updatedirstate and not branchmerge |
|
2558 | 2605 | stats, getfiledata = applyupdates( |
|
2559 | 2606 | repo, actions, wc, p2, overwrite, wantfiledata, labels=labels |
@@ -2574,15 +2621,65 b' def update(' | |||
|
2574 | 2621 | if not branchmerge: |
|
2575 | 2622 | sparse.prunetemporaryincludes(repo) |
|
2576 | 2623 | |
|
2577 | if not partial: | |
|
2624 | if updatedirstate: | |
|
2578 | 2625 | repo.hook( |
|
2579 | 2626 | b'update', parent1=xp1, parent2=xp2, error=stats.unresolvedcount |
|
2580 | 2627 | ) |
|
2581 | 2628 | return stats |
|
2582 | 2629 | |
|
2583 | 2630 | |
|
2631 | def merge(ctx, labels=None, force=False, wc=None): | |
|
2632 | """Merge another topological branch into the working copy. | |
|
2633 | ||
|
2634 | force = whether the merge was run with 'merge --force' (deprecated) | |
|
2635 | """ | |
|
2636 | ||
|
2637 | return update( | |
|
2638 | ctx.repo(), | |
|
2639 | ctx.rev(), | |
|
2640 | labels=labels, | |
|
2641 | branchmerge=True, | |
|
2642 | force=force, | |
|
2643 | mergeforce=force, | |
|
2644 | wc=wc, | |
|
2645 | ) | |
|
2646 | ||
|
2647 | ||
|
2648 | def clean_update(ctx, wc=None): | |
|
2649 | """Do a clean update to the given commit. | |
|
2650 | ||
|
2651 | This involves updating to the commit and discarding any changes in the | |
|
2652 | working copy. | |
|
2653 | """ | |
|
2654 | return update(ctx.repo(), ctx.rev(), branchmerge=False, force=True, wc=wc) | |
|
2655 | ||
|
2656 | ||
|
2657 | def revert_to(ctx, matcher=None, wc=None): | |
|
2658 | """Revert the working copy to the given commit. | |
|
2659 | ||
|
2660 | The working copy will keep its current parent(s) but its content will | |
|
2661 | be the same as in the given commit. | |
|
2662 | """ | |
|
2663 | ||
|
2664 | return update( | |
|
2665 | ctx.repo(), | |
|
2666 | ctx.rev(), | |
|
2667 | branchmerge=False, | |
|
2668 | force=True, | |
|
2669 | updatedirstate=False, | |
|
2670 | matcher=matcher, | |
|
2671 | wc=wc, | |
|
2672 | ) | |
|
2673 | ||
|
2674 | ||
|
2584 | 2675 | def graft( |
|
2585 | repo, ctx, base, labels=None, keepparent=False, keepconflictparent=False | |
|
2676 | repo, | |
|
2677 | ctx, | |
|
2678 | base=None, | |
|
2679 | labels=None, | |
|
2680 | keepparent=False, | |
|
2681 | keepconflictparent=False, | |
|
2682 | wctx=None, | |
|
2586 | 2683 | ): |
|
2587 | 2684 | """Do a graft-like merge. |
|
2588 | 2685 | |
@@ -2593,7 +2690,7 b' def graft(' | |||
|
2593 | 2690 | renames/copies appropriately. |
|
2594 | 2691 | |
|
2595 | 2692 | ctx - changeset to rebase |
|
2596 |
base - merge base, |
|
|
2693 | base - merge base, or ctx.p1() if not specified | |
|
2597 | 2694 | labels - merge labels eg ['local', 'graft'] |
|
2598 | 2695 | keepparent - keep second parent if any |
|
2599 | 2696 | keepconflictparent - if unresolved, keep parent used for the merge |
@@ -2605,9 +2702,15 b' def graft(' | |||
|
2605 | 2702 | # to copy commits), and 2) informs update that the incoming changes are |
|
2606 | 2703 | # newer than the destination so it doesn't prompt about "remote changed foo |
|
2607 | 2704 | # which local deleted". |
|
2608 | wctx = repo[None] | |
|
2705 | # We also pass mergeancestor=True when base is the same revision as p1. 2) | |
|
2706 | # doesn't matter as there can't possibly be conflicts, but 1) is necessary. | |
|
2707 | wctx = wctx or repo[None] | |
|
2609 | 2708 | pctx = wctx.p1() |
|
2610 | mergeancestor = repo.changelog.isancestor(pctx.node(), ctx.node()) | |
|
2709 | base = base or ctx.p1() | |
|
2710 | mergeancestor = ( | |
|
2711 | repo.changelog.isancestor(pctx.node(), ctx.node()) | |
|
2712 | or pctx.rev() == base.rev() | |
|
2713 | ) | |
|
2611 | 2714 | |
|
2612 | 2715 | stats = update( |
|
2613 | 2716 | repo, |
@@ -2617,6 +2720,7 b' def graft(' | |||
|
2617 | 2720 | base.node(), |
|
2618 | 2721 | mergeancestor=mergeancestor, |
|
2619 | 2722 | labels=labels, |
|
2723 | wc=wctx, | |
|
2620 | 2724 | ) |
|
2621 | 2725 | |
|
2622 | 2726 | if keepconflictparent and stats.unresolvedcount: |
@@ -2631,6 +2735,11 b' def graft(' | |||
|
2631 | 2735 | if pother == pctx.node(): |
|
2632 | 2736 | pother = nullid |
|
2633 | 2737 | |
|
2738 | if wctx.isinmemory(): | |
|
2739 | wctx.setparents(pctx.node(), pother) | |
|
2740 | # fix up dirstate for copies and renames | |
|
2741 | copies.graftcopies(wctx, ctx, base) | |
|
2742 | else: | |
|
2634 | 2743 | with repo.dirstate.parentchange(): |
|
2635 | 2744 | repo.setparents(pctx.node(), pother) |
|
2636 | 2745 | repo.dirstate.write(repo.currenttransaction()) |
@@ -2642,6 +2751,7 b' def graft(' | |||
|
2642 | 2751 | def purge( |
|
2643 | 2752 | repo, |
|
2644 | 2753 | matcher, |
|
2754 | unknown=True, | |
|
2645 | 2755 | ignored=False, |
|
2646 | 2756 | removeemptydirs=True, |
|
2647 | 2757 | removefiles=True, |
@@ -2653,7 +2763,9 b' def purge(' | |||
|
2653 | 2763 | ``matcher`` is a matcher configured to scan the working directory - |
|
2654 | 2764 | potentially a subset. |
|
2655 | 2765 | |
|
2656 |
`` |
|
|
2766 | ``unknown`` controls whether unknown files should be purged. | |
|
2767 | ||
|
2768 | ``ignored`` controls whether ignored files should be purged. | |
|
2657 | 2769 | |
|
2658 | 2770 | ``removeemptydirs`` controls whether empty directories should be removed. |
|
2659 | 2771 | |
@@ -2690,7 +2802,7 b' def purge(' | |||
|
2690 | 2802 | directories = [] |
|
2691 | 2803 | matcher.traversedir = directories.append |
|
2692 | 2804 | |
|
2693 |
status = repo.status(match=matcher, ignored=ignored, unknown= |
|
|
2805 | status = repo.status(match=matcher, ignored=ignored, unknown=unknown) | |
|
2694 | 2806 | |
|
2695 | 2807 | if removefiles: |
|
2696 | 2808 | for f in sorted(status.unknown + status.ignored): |
@@ -83,6 +83,9 b' class namespaces(object):' | |||
|
83 | 83 | def __iter__(self): |
|
84 | 84 | return self._names.__iter__() |
|
85 | 85 | |
|
86 | def get(self, namespace, default=None): | |
|
87 | return self._names.get(namespace, default) | |
|
88 | ||
|
86 | 89 | def items(self): |
|
87 | 90 | return pycompat.iteritems(self._names) |
|
88 | 91 |
@@ -233,21 +233,6 b' def restrictpatterns(req_includes, req_e' | |||
|
233 | 233 | :param repo_includes: repo includes |
|
234 | 234 | :param repo_excludes: repo excludes |
|
235 | 235 | :return: include patterns, exclude patterns, and invalid include patterns. |
|
236 | ||
|
237 | >>> restrictpatterns({'f1','f2'}, {}, ['f1'], []) | |
|
238 | (set(['f1']), {}, []) | |
|
239 | >>> restrictpatterns({'f1'}, {}, ['f1','f2'], []) | |
|
240 | (set(['f1']), {}, []) | |
|
241 | >>> restrictpatterns({'f1/fc1', 'f3/fc3'}, {}, ['f1','f2'], []) | |
|
242 | (set(['f1/fc1']), {}, []) | |
|
243 | >>> restrictpatterns({'f1_fc1'}, {}, ['f1','f2'], []) | |
|
244 | ([], set(['path:.']), []) | |
|
245 | >>> restrictpatterns({'f1/../f2/fc2'}, {}, ['f1','f2'], []) | |
|
246 | (set(['f2/fc2']), {}, []) | |
|
247 | >>> restrictpatterns({'f1/../f3/fc3'}, {}, ['f1','f2'], []) | |
|
248 | ([], set(['path:.']), []) | |
|
249 | >>> restrictpatterns({'f1/$non_exitent_var'}, {}, ['f1','f2'], []) | |
|
250 | (set(['f1/$non_exitent_var']), {}, []) | |
|
251 | 236 | """ |
|
252 | 237 | res_excludes = set(req_excludes) |
|
253 | 238 | res_excludes.update(repo_excludes) |
@@ -939,7 +939,7 b' def _computeobsoleteset(repo):' | |||
|
939 | 939 | getnode = repo.changelog.node |
|
940 | 940 | notpublic = _mutablerevs(repo) |
|
941 | 941 | isobs = repo.obsstore.successors.__contains__ |
|
942 |
obs = |
|
|
942 | obs = {r for r in notpublic if isobs(getnode(r))} | |
|
943 | 943 | return obs |
|
944 | 944 | |
|
945 | 945 | |
@@ -965,7 +965,7 b' def _computeorphanset(repo):' | |||
|
965 | 965 | def _computesuspendedset(repo): |
|
966 | 966 | """the set of obsolete parents with non obsolete descendants""" |
|
967 | 967 | suspended = repo.changelog.ancestors(getrevs(repo, b'orphan')) |
|
968 |
return |
|
|
968 | return {r for r in getrevs(repo, b'obsolete') if r in suspended} | |
|
969 | 969 | |
|
970 | 970 | |
|
971 | 971 | @cachefor(b'extinct') |
@@ -194,7 +194,7 b' def allsuccessors(obsstore, nodes, ignor' | |||
|
194 | 194 | |
|
195 | 195 | def _filterprunes(markers): |
|
196 | 196 | """return a set with no prune markers""" |
|
197 |
return |
|
|
197 | return {m for m in markers if m[1]} | |
|
198 | 198 | |
|
199 | 199 | |
|
200 | 200 | def exclusivemarkers(repo, nodes): |
@@ -338,12 +338,12 b' def foreground(repo, nodes):' | |||
|
338 | 338 | # compute the whole set of successors or descendants |
|
339 | 339 | while len(foreground) != plen: |
|
340 | 340 | plen = len(foreground) |
|
341 |
succs = |
|
|
341 | succs = {c.node() for c in foreground} | |
|
342 | 342 | mutable = [c.node() for c in foreground if c.mutable()] |
|
343 | 343 | succs.update(allsuccessors(repo.obsstore, mutable)) |
|
344 | 344 | known = (n for n in succs if has_node(n)) |
|
345 | 345 | foreground = set(repo.set(b'%ln::', known)) |
|
346 |
return |
|
|
346 | return {c.node() for c in foreground} | |
|
347 | 347 | |
|
348 | 348 | |
|
349 | 349 | # effectflag field |
@@ -855,11 +855,11 b' def markersusers(markers):' | |||
|
855 | 855 | """ Returns a sorted list of markers users without duplicates |
|
856 | 856 | """ |
|
857 | 857 | markersmeta = [dict(m[3]) for m in markers] |
|
858 |
users = |
|
|
858 | users = { | |
|
859 | 859 | encoding.tolocal(meta[b'user']) |
|
860 | 860 | for meta in markersmeta |
|
861 | 861 | if meta.get(b'user') |
|
862 |
|
|
|
862 | } | |
|
863 | 863 | |
|
864 | 864 | return sorted(users) |
|
865 | 865 | |
@@ -868,9 +868,9 b' def markersoperations(markers):' | |||
|
868 | 868 | """ Returns a sorted list of markers operations without duplicates |
|
869 | 869 | """ |
|
870 | 870 | markersmeta = [dict(m[3]) for m in markers] |
|
871 |
operations = |
|
|
871 | operations = { | |
|
872 | 872 | meta.get(b'operation') for meta in markersmeta if meta.get(b'operation') |
|
873 |
|
|
|
873 | } | |
|
874 | 874 | |
|
875 | 875 | return sorted(operations) |
|
876 | 876 |
@@ -2888,7 +2888,7 b' def _filepairs(modified, added, removed,' | |||
|
2888 | 2888 | or 'rename' (the latter two only if opts.git is set).''' |
|
2889 | 2889 | gone = set() |
|
2890 | 2890 | |
|
2891 |
copyto = |
|
|
2891 | copyto = {v: k for k, v in copy.items()} | |
|
2892 | 2892 | |
|
2893 | 2893 | addedset, removedset = set(added), set(removed) |
|
2894 | 2894 |
@@ -84,7 +84,7 b' class pathauditor(object):' | |||
|
84 | 84 | _(b"path contains illegal component: %s") % path |
|
85 | 85 | ) |
|
86 | 86 | if b'.hg' in _lowerclean(path): |
|
87 |
lparts = [_lowerclean(p |
|
|
87 | lparts = [_lowerclean(p) for p in parts] | |
|
88 | 88 | for p in b'.hg', b'.hg.': |
|
89 | 89 | if p in lparts[1:]: |
|
90 | 90 | pos = lparts.index(p) |
@@ -99,10 +99,11 b' class pathauditor(object):' | |||
|
99 | 99 | |
|
100 | 100 | parts.pop() |
|
101 | 101 | normparts.pop() |
|
102 | prefixes = [] | |
|
103 | 102 | # It's important that we check the path parts starting from the root. |
|
104 | # This means we won't accidentally traverse a symlink into some other | |
|
105 | # filesystem (which is potentially expensive to access). | |
|
103 | # We don't want to add "foo/bar/baz" to auditeddir before checking if | |
|
104 | # there's a "foo/.hg" directory. This also means we won't accidentally | |
|
105 | # traverse a symlink into some other filesystem (which is potentially | |
|
106 | # expensive to access). | |
|
106 | 107 | for i in range(len(parts)): |
|
107 | 108 | prefix = pycompat.ossep.join(parts[: i + 1]) |
|
108 | 109 | normprefix = pycompat.ossep.join(normparts[: i + 1]) |
@@ -110,13 +111,11 b' class pathauditor(object):' | |||
|
110 | 111 | continue |
|
111 | 112 | if self._realfs: |
|
112 | 113 | self._checkfs(prefix, path) |
|
113 | prefixes.append(normprefix) | |
|
114 | if self._cached: | |
|
115 | self.auditeddir.add(normprefix) | |
|
114 | 116 | |
|
115 | 117 | if self._cached: |
|
116 | 118 | self.audited.add(normpath) |
|
117 | # only add prefixes to the cache after checking everything: we don't | |
|
118 | # want to add "foo/bar/baz" before checking if there's a "foo/.hg" | |
|
119 | self.auditeddir.update(prefixes) | |
|
120 | 119 | |
|
121 | 120 | def _checkfs(self, prefix, path): |
|
122 | 121 | """raise exception if a file system backed check fails""" |
@@ -287,6 +286,9 b' class dirs(object):' | |||
|
287 | 286 | '''a multiset of directory names from a set of file paths''' |
|
288 | 287 | |
|
289 | 288 | def __init__(self, map, skip=None): |
|
289 | ''' | |
|
290 | a dict map indicates a dirstate while a list indicates a manifest | |
|
291 | ''' | |
|
290 | 292 | self._dirs = {} |
|
291 | 293 | addpath = self.addpath |
|
292 | 294 | if isinstance(map, dict) and skip is not None: |
@@ -216,17 +216,101 b' def binarydecode(stream):' | |||
|
216 | 216 | return headsbyphase |
|
217 | 217 | |
|
218 | 218 | |
|
219 | def _sortedrange_insert(data, idx, rev, t): | |
|
220 | merge_before = False | |
|
221 | if idx: | |
|
222 | r1, t1 = data[idx - 1] | |
|
223 | merge_before = r1[-1] + 1 == rev and t1 == t | |
|
224 | merge_after = False | |
|
225 | if idx < len(data): | |
|
226 | r2, t2 = data[idx] | |
|
227 | merge_after = r2[0] == rev + 1 and t2 == t | |
|
228 | ||
|
229 | if merge_before and merge_after: | |
|
230 | data[idx - 1] = (pycompat.xrange(r1[0], r2[-1] + 1), t) | |
|
231 | data.pop(idx) | |
|
232 | elif merge_before: | |
|
233 | data[idx - 1] = (pycompat.xrange(r1[0], rev + 1), t) | |
|
234 | elif merge_after: | |
|
235 | data[idx] = (pycompat.xrange(rev, r2[-1] + 1), t) | |
|
236 | else: | |
|
237 | data.insert(idx, (pycompat.xrange(rev, rev + 1), t)) | |
|
238 | ||
|
239 | ||
|
240 | def _sortedrange_split(data, idx, rev, t): | |
|
241 | r1, t1 = data[idx] | |
|
242 | if t == t1: | |
|
243 | return | |
|
244 | t = (t1[0], t[1]) | |
|
245 | if len(r1) == 1: | |
|
246 | data.pop(idx) | |
|
247 | _sortedrange_insert(data, idx, rev, t) | |
|
248 | elif r1[0] == rev: | |
|
249 | data[idx] = (pycompat.xrange(rev + 1, r1[-1] + 1), t1) | |
|
250 | _sortedrange_insert(data, idx, rev, t) | |
|
251 | elif r1[-1] == rev: | |
|
252 | data[idx] = (pycompat.xrange(r1[0], rev), t1) | |
|
253 | _sortedrange_insert(data, idx + 1, rev, t) | |
|
254 | else: | |
|
255 | data[idx : idx + 1] = [ | |
|
256 | (pycompat.xrange(r1[0], rev), t1), | |
|
257 | (pycompat.xrange(rev, rev + 1), t), | |
|
258 | (pycompat.xrange(rev + 1, r1[-1] + 1), t1), | |
|
259 | ] | |
|
260 | ||
|
261 | ||
|
219 | 262 | def _trackphasechange(data, rev, old, new): |
|
220 |
"""add a phase move the <data> |
|
|
263 | """add a phase move to the <data> list of ranges | |
|
221 | 264 | |
|
222 | 265 | If data is None, nothing happens. |
|
223 | 266 | """ |
|
224 | 267 | if data is None: |
|
225 | 268 | return |
|
226 | existing = data.get(rev) | |
|
227 | if existing is not None: | |
|
228 | old = existing[0] | |
|
229 | data[rev] = (old, new) | |
|
269 | ||
|
270 | # If data is empty, create a one-revision range and done | |
|
271 | if not data: | |
|
272 | data.insert(0, (pycompat.xrange(rev, rev + 1), (old, new))) | |
|
273 | return | |
|
274 | ||
|
275 | low = 0 | |
|
276 | high = len(data) | |
|
277 | t = (old, new) | |
|
278 | while low < high: | |
|
279 | mid = (low + high) // 2 | |
|
280 | revs = data[mid][0] | |
|
281 | ||
|
282 | if rev in revs: | |
|
283 | _sortedrange_split(data, mid, rev, t) | |
|
284 | return | |
|
285 | ||
|
286 | if revs[0] == rev + 1: | |
|
287 | if mid and data[mid - 1][0][-1] == rev: | |
|
288 | _sortedrange_split(data, mid - 1, rev, t) | |
|
289 | else: | |
|
290 | _sortedrange_insert(data, mid, rev, t) | |
|
291 | return | |
|
292 | ||
|
293 | if revs[-1] == rev - 1: | |
|
294 | if mid + 1 < len(data) and data[mid + 1][0][0] == rev: | |
|
295 | _sortedrange_split(data, mid + 1, rev, t) | |
|
296 | else: | |
|
297 | _sortedrange_insert(data, mid + 1, rev, t) | |
|
298 | return | |
|
299 | ||
|
300 | if revs[0] > rev: | |
|
301 | high = mid | |
|
302 | else: | |
|
303 | low = mid + 1 | |
|
304 | ||
|
305 | if low == len(data): | |
|
306 | data.append((pycompat.xrange(rev, rev + 1), t)) | |
|
307 | return | |
|
308 | ||
|
309 | r1, t1 = data[low] | |
|
310 | if r1[0] > rev: | |
|
311 | data.insert(low, (pycompat.xrange(rev, rev + 1), t)) | |
|
312 | else: | |
|
313 | data.insert(low + 1, (pycompat.xrange(rev, rev + 1), t)) | |
|
230 | 314 | |
|
231 | 315 | |
|
232 | 316 | class phasecache(object): |
@@ -400,8 +484,9 b' class phasecache(object):' | |||
|
400 | 484 | phasetracking = tr.changes[b'phases'] |
|
401 | 485 | torev = repo.changelog.rev |
|
402 | 486 | phase = self.phase |
|
403 |
for n in nodes |
|
|
404 |
|
|
|
487 | revs = [torev(node) for node in nodes] | |
|
488 | revs.sort() | |
|
489 | for rev in revs: | |
|
405 | 490 | revphase = phase(repo, rev) |
|
406 | 491 | _trackphasechange(phasetracking, rev, None, revphase) |
|
407 | 492 | repo.invalidatevolatilesets() |
@@ -445,10 +530,10 b' class phasecache(object):' | |||
|
445 | 530 | phasetracking, r, self.phase(repo, r), targetphase |
|
446 | 531 | ) |
|
447 | 532 | |
|
448 |
roots = |
|
|
533 | roots = { | |
|
449 | 534 | ctx.node() |
|
450 | 535 | for ctx in repo.set(b'roots((%ln::) - %ld)', olds, affected) |
|
451 |
|
|
|
536 | } | |
|
452 | 537 | if olds != roots: |
|
453 | 538 | self._updateroots(phase, roots, tr) |
|
454 | 539 | # some roots may need to be declared for lower phases |
@@ -485,7 +570,7 b' class phasecache(object):' | |||
|
485 | 570 | affected -= revs |
|
486 | 571 | else: # public phase |
|
487 | 572 | revs = affected |
|
488 | for r in revs: | |
|
573 | for r in sorted(revs): | |
|
489 | 574 | _trackphasechange(phasetracking, r, phase, targetphase) |
|
490 | 575 | repo.invalidatevolatilesets() |
|
491 | 576 | |
@@ -518,9 +603,7 b' class phasecache(object):' | |||
|
518 | 603 | ] |
|
519 | 604 | updatedroots = repo.set(b'roots(%ln::)', aboveroots) |
|
520 | 605 | |
|
521 | finalroots = set( | |
|
522 | n for n in currentroots if repo[n].rev() < minnewroot | |
|
523 | ) | |
|
606 | finalroots = {n for n in currentroots if repo[n].rev() < minnewroot} | |
|
524 | 607 | finalroots.update(ctx.node() for ctx in updatedroots) |
|
525 | 608 | if finalroots != oldroots: |
|
526 | 609 | self._updateroots(targetphase, finalroots, tr) |
@@ -760,7 +843,7 b' def newheads(repo, heads, roots):' | |||
|
760 | 843 | if not heads or heads == [nullid]: |
|
761 | 844 | return [] |
|
762 | 845 | # The logic operated on revisions, convert arguments early for convenience |
|
763 |
new_heads = |
|
|
846 | new_heads = {rev(n) for n in heads if n != nullid} | |
|
764 | 847 | roots = [rev(n) for n in roots] |
|
765 | 848 | # compute the area we need to remove |
|
766 | 849 | affected_zone = repo.revs(b"(%ld::%ld)", roots, new_heads) |
@@ -324,9 +324,8 b' def checklink(path):' | |||
|
324 | 324 | open(fullpath, b'w').close() |
|
325 | 325 | except IOError as inst: |
|
326 | 326 | if ( |
|
327 |
inst[0] |
|
|
328 | == errno.EACCES | |
|
329 | ): | |
|
327 | inst[0] == errno.EACCES | |
|
328 | ): # pytype: disable=unsupported-operands | |
|
330 | 329 | # If we can't write to cachedir, just pretend |
|
331 | 330 | # that the fs is readonly and by association |
|
332 | 331 | # that the fs won't support symlinks. This |
@@ -186,6 +186,7 b' class profile(object):' | |||
|
186 | 186 | self._output = None |
|
187 | 187 | self._fp = None |
|
188 | 188 | self._fpdoclose = True |
|
189 | self._flushfp = None | |
|
189 | 190 | self._profiler = None |
|
190 | 191 | self._enabled = enabled |
|
191 | 192 | self._entered = False |
@@ -246,6 +247,8 b' class profile(object):' | |||
|
246 | 247 | else: |
|
247 | 248 | self._fpdoclose = False |
|
248 | 249 | self._fp = self._ui.ferr |
|
250 | # Ensure we've flushed fout before writing to ferr. | |
|
251 | self._flushfp = self._ui.fout | |
|
249 | 252 | |
|
250 | 253 | if proffn is not None: |
|
251 | 254 | pass |
@@ -265,6 +268,7 b' class profile(object):' | |||
|
265 | 268 | def __exit__(self, exception_type, exception_value, traceback): |
|
266 | 269 | propagate = None |
|
267 | 270 | if self._profiler is not None: |
|
271 | self._uiflush() | |
|
268 | 272 | propagate = self._profiler.__exit__( |
|
269 | 273 | exception_type, exception_value, traceback |
|
270 | 274 | ) |
@@ -280,3 +284,7 b' class profile(object):' | |||
|
280 | 284 | def _closefp(self): |
|
281 | 285 | if self._fpdoclose and self._fp is not None: |
|
282 | 286 | self._fp.close() |
|
287 | ||
|
288 | def _uiflush(self): | |
|
289 | if self._flushfp: | |
|
290 | self._flushfp.flush() |
@@ -141,6 +141,50 b' class IndexObject(BaseIndexObject):' | |||
|
141 | 141 | self._extra = self._extra[: i - self._lgt] |
|
142 | 142 | |
|
143 | 143 | |
|
144 | class PersistentNodeMapIndexObject(IndexObject): | |
|
145 | """a Debug oriented class to test persistent nodemap | |
|
146 | ||
|
147 | We need a simple python object to test API and higher level behavior. See | |
|
148 | the Rust implementation for more serious usage. This should be used only | |
|
149 | through the dedicated `devel.persistent-nodemap` config. | |
|
150 | """ | |
|
151 | ||
|
152 | def nodemap_data_all(self): | |
|
153 | """Return bytes containing a full serialization of a nodemap | |
|
154 | ||
|
155 | The nodemap should be valid for the full set of revisions in the | |
|
156 | index.""" | |
|
157 | return nodemaputil.persistent_data(self) | |
|
158 | ||
|
159 | def nodemap_data_incremental(self): | |
|
160 | """Return bytes containing a incremental update to persistent nodemap | |
|
161 | ||
|
162 | This containst the data for an append-only update of the data provided | |
|
163 | in the last call to `update_nodemap_data`. | |
|
164 | """ | |
|
165 | if self._nm_root is None: | |
|
166 | return None | |
|
167 | docket = self._nm_docket | |
|
168 | changed, data = nodemaputil.update_persistent_data( | |
|
169 | self, self._nm_root, self._nm_max_idx, self._nm_docket.tip_rev | |
|
170 | ) | |
|
171 | ||
|
172 | self._nm_root = self._nm_max_idx = self._nm_docket = None | |
|
173 | return docket, changed, data | |
|
174 | ||
|
175 | def update_nodemap_data(self, docket, nm_data): | |
|
176 | """provide full block of persisted binary data for a nodemap | |
|
177 | ||
|
178 | The data are expected to come from disk. See `nodemap_data_all` for a | |
|
179 | produceur of such data.""" | |
|
180 | if nm_data is not None: | |
|
181 | self._nm_root, self._nm_max_idx = nodemaputil.parse_data(nm_data) | |
|
182 | if self._nm_root: | |
|
183 | self._nm_docket = docket | |
|
184 | else: | |
|
185 | self._nm_root = self._nm_max_idx = self._nm_docket = None | |
|
186 | ||
|
187 | ||
|
144 | 188 | class InlinedIndexObject(BaseIndexObject): |
|
145 | 189 | def __init__(self, data, inline=0): |
|
146 | 190 | self._data = data |
@@ -188,6 +232,12 b' def parse_index2(data, inline):' | |||
|
188 | 232 | return InlinedIndexObject(data, inline), (0, data) |
|
189 | 233 | |
|
190 | 234 | |
|
235 | def parse_index_devel_nodemap(data, inline): | |
|
236 | """like parse_index2, but alway return a PersistentNodeMapIndexObject | |
|
237 | """ | |
|
238 | return PersistentNodeMapIndexObject(data), None | |
|
239 | ||
|
240 | ||
|
191 | 241 | def parse_dirstate(dmap, copymap, st): |
|
192 | 242 | parents = [st[:20], st[20:40]] |
|
193 | 243 | # dereference fields so they will be local in loop |
@@ -48,7 +48,7 b' Uses:' | |||
|
48 | 48 | different branches |
|
49 | 49 | ''' |
|
50 | 50 | |
|
51 |
from __future__ import absolute_import |
|
|
51 | from __future__ import absolute_import | |
|
52 | 52 | |
|
53 | 53 | from .node import nullrev |
|
54 | 54 | from . import ( |
@@ -98,6 +98,7 b' if ispy3:' | |||
|
98 | 98 | import codecs |
|
99 | 99 | import functools |
|
100 | 100 | import io |
|
101 | import locale | |
|
101 | 102 | import struct |
|
102 | 103 | |
|
103 | 104 | if os.name == r'nt' and sys.version_info >= (3, 6): |
@@ -148,15 +149,36 b' if ispy3:' | |||
|
148 | 149 | stdout = sys.stdout.buffer |
|
149 | 150 | stderr = sys.stderr.buffer |
|
150 | 151 | |
|
151 | # Since Python 3 converts argv to wchar_t type by Py_DecodeLocale() on Unix, | |
|
152 | # we can use os.fsencode() to get back bytes argv. | |
|
153 | # | |
|
154 | # https://hg.python.org/cpython/file/v3.5.1/Programs/python.c#l55 | |
|
152 | if getattr(sys, 'argv', None) is not None: | |
|
153 | # On POSIX, the char** argv array is converted to Python str using | |
|
154 | # Py_DecodeLocale(). The inverse of this is Py_EncodeLocale(), which isn't | |
|
155 | # directly callable from Python code. So, we need to emulate it. | |
|
156 | # Py_DecodeLocale() calls mbstowcs() and falls back to mbrtowc() with | |
|
157 | # surrogateescape error handling on failure. These functions take the | |
|
158 | # current system locale into account. So, the inverse operation is to | |
|
159 | # .encode() using the system locale's encoding and using the | |
|
160 | # surrogateescape error handler. The only tricky part here is getting | |
|
161 | # the system encoding correct, since `locale.getlocale()` can return | |
|
162 | # None. We fall back to the filesystem encoding if lookups via `locale` | |
|
163 | # fail, as this seems like a reasonable thing to do. | |
|
155 | 164 | # |
|
156 | # On Windows, the native argv is unicode and is converted to MBCS bytes | |
|
157 | # since we do enable the legacy filesystem encoding. | |
|
158 | if getattr(sys, 'argv', None) is not None: | |
|
159 | sysargv = list(map(os.fsencode, sys.argv)) | |
|
165 | # On Windows, the wchar_t **argv is passed into the interpreter as-is. | |
|
166 | # Like POSIX, we need to emulate what Py_EncodeLocale() would do. But | |
|
167 | # there's an additional wrinkle. What we really want to access is the | |
|
168 | # ANSI codepage representation of the arguments, as this is what | |
|
169 | # `int main()` would receive if Python 3 didn't define `int wmain()` | |
|
170 | # (this is how Python 2 worked). To get that, we encode with the mbcs | |
|
171 | # encoding, which will pass CP_ACP to the underlying Windows API to | |
|
172 | # produce bytes. | |
|
173 | if os.name == r'nt': | |
|
174 | sysargv = [a.encode("mbcs", "ignore") for a in sys.argv] | |
|
175 | else: | |
|
176 | encoding = ( | |
|
177 | locale.getlocale()[1] | |
|
178 | or locale.getdefaultlocale()[1] | |
|
179 | or sys.getfilesystemencoding() | |
|
180 | ) | |
|
181 | sysargv = [a.encode(encoding, "surrogateescape") for a in sys.argv] | |
|
160 | 182 | |
|
161 | 183 | bytechr = struct.Struct('>B').pack |
|
162 | 184 | byterepr = b'%r'.__mod__ |
@@ -334,7 +356,7 b' if ispy3:' | |||
|
334 | 356 | they can be passed as keyword arguments as dictonaries with bytes keys |
|
335 | 357 | can't be passed as keyword arguments to functions on Python 3. |
|
336 | 358 | """ |
|
337 |
dic = |
|
|
359 | dic = {k.decode('latin-1'): v for k, v in dic.items()} | |
|
338 | 360 | return dic |
|
339 | 361 | |
|
340 | 362 | def byteskwargs(dic): |
@@ -342,7 +364,7 b' if ispy3:' | |||
|
342 | 364 | Converts keys of python dictonaries to bytes as they were converted to |
|
343 | 365 | str to pass that dictonary as a keyword argument on Python 3. |
|
344 | 366 | """ |
|
345 |
dic = |
|
|
367 | dic = {k.encode('latin-1'): v for k, v in dic.items()} | |
|
346 | 368 | return dic |
|
347 | 369 | |
|
348 | 370 | # TODO: handle shlex.shlex(). |
@@ -351,7 +351,7 b' def _createstripbackup(repo, stripbases,' | |||
|
351 | 351 | def safestriproots(ui, repo, nodes): |
|
352 | 352 | """return list of roots of nodes where descendants are covered by nodes""" |
|
353 | 353 | torev = repo.unfiltered().changelog.rev |
|
354 |
revs = |
|
|
354 | revs = {torev(n) for n in nodes} | |
|
355 | 355 | # tostrip = wanted - unsafe = wanted - ancestors(orphaned) |
|
356 | 356 | # orphaned = affected - wanted |
|
357 | 357 | # affected = descendants(roots(wanted)) |
@@ -352,6 +352,21 b' class revlogio(object):' | |||
|
352 | 352 | return p |
|
353 | 353 | |
|
354 | 354 | |
|
355 | NodemapRevlogIO = None | |
|
356 | ||
|
357 | if util.safehasattr(parsers, 'parse_index_devel_nodemap'): | |
|
358 | ||
|
359 | class NodemapRevlogIO(revlogio): | |
|
360 | """A debug oriented IO class that return a PersistentNodeMapIndexObject | |
|
361 | ||
|
362 | The PersistentNodeMapIndexObject object is meant to test the persistent nodemap feature. | |
|
363 | """ | |
|
364 | ||
|
365 | def parseindex(self, data, inline): | |
|
366 | index, cache = parsers.parse_index_devel_nodemap(data, inline) | |
|
367 | return index, cache | |
|
368 | ||
|
369 | ||
|
355 | 370 | class rustrevlogio(revlogio): |
|
356 | 371 | def parseindex(self, data, inline): |
|
357 | 372 | index, cache = super(rustrevlogio, self).parseindex(data, inline) |
@@ -407,6 +422,7 b' class revlog(object):' | |||
|
407 | 422 | mmaplargeindex=False, |
|
408 | 423 | censorable=False, |
|
409 | 424 | upperboundcomp=None, |
|
425 | persistentnodemap=False, | |
|
410 | 426 | ): |
|
411 | 427 | """ |
|
412 | 428 | create a revlog object |
@@ -418,6 +434,17 b' class revlog(object):' | |||
|
418 | 434 | self.upperboundcomp = upperboundcomp |
|
419 | 435 | self.indexfile = indexfile |
|
420 | 436 | self.datafile = datafile or (indexfile[:-2] + b".d") |
|
437 | self.nodemap_file = None | |
|
438 | if persistentnodemap: | |
|
439 | if indexfile.endswith(b'.a'): | |
|
440 | pending_path = indexfile[:-4] + b".n.a" | |
|
441 | if opener.exists(pending_path): | |
|
442 | self.nodemap_file = pending_path | |
|
443 | else: | |
|
444 | self.nodemap_file = indexfile[:-4] + b".n" | |
|
445 | else: | |
|
446 | self.nodemap_file = indexfile[:-2] + b".n" | |
|
447 | ||
|
421 | 448 | self.opener = opener |
|
422 | 449 | # When True, indexfile is opened with checkambig=True at writing, to |
|
423 | 450 | # avoid file stat ambiguity. |
@@ -435,6 +462,7 b' class revlog(object):' | |||
|
435 | 462 | self._maxchainlen = None |
|
436 | 463 | self._deltabothparents = True |
|
437 | 464 | self.index = None |
|
465 | self._nodemap_docket = None | |
|
438 | 466 | # Mapping of partial identifiers to full nodes. |
|
439 | 467 | self._pcache = {} |
|
440 | 468 | # Mapping of revision integer to full node. |
@@ -591,13 +619,42 b' class revlog(object):' | |||
|
591 | 619 | |
|
592 | 620 | self._storedeltachains = True |
|
593 | 621 | |
|
622 | devel_nodemap = ( | |
|
623 | self.nodemap_file | |
|
624 | and opts.get(b'devel-force-nodemap', False) | |
|
625 | and NodemapRevlogIO is not None | |
|
626 | ) | |
|
627 | ||
|
628 | use_rust_index = False | |
|
629 | if rustrevlog is not None: | |
|
630 | if self.nodemap_file is not None: | |
|
631 | use_rust_index = True | |
|
632 | else: | |
|
633 | use_rust_index = self.opener.options.get(b'rust.index') | |
|
634 | ||
|
594 | 635 | self._io = revlogio() |
|
595 | 636 | if self.version == REVLOGV0: |
|
596 | 637 | self._io = revlogoldio() |
|
597 | elif rustrevlog is not None and self.opener.options.get(b'rust.index'): | |
|
638 | elif devel_nodemap: | |
|
639 | self._io = NodemapRevlogIO() | |
|
640 | elif use_rust_index: | |
|
598 | 641 | self._io = rustrevlogio() |
|
599 | 642 | try: |
|
600 | 643 | d = self._io.parseindex(indexdata, self._inline) |
|
644 | index, _chunkcache = d | |
|
645 | use_nodemap = ( | |
|
646 | not self._inline | |
|
647 | and self.nodemap_file is not None | |
|
648 | and util.safehasattr(index, 'update_nodemap_data') | |
|
649 | ) | |
|
650 | if use_nodemap: | |
|
651 | nodemap_data = nodemaputil.persisted_data(self) | |
|
652 | if nodemap_data is not None: | |
|
653 | docket = nodemap_data[0] | |
|
654 | if d[0][docket.tip_rev][7] == docket.tip_node: | |
|
655 | # no changelog tampering | |
|
656 | self._nodemap_docket = docket | |
|
657 | index.update_nodemap_data(*nodemap_data) | |
|
601 | 658 | except (ValueError, IndexError): |
|
602 | 659 | raise error.RevlogError( |
|
603 | 660 | _(b"index %s is corrupted") % self.indexfile |
@@ -708,12 +765,32 b' class revlog(object):' | |||
|
708 | 765 | return False |
|
709 | 766 | return True |
|
710 | 767 | |
|
768 | def update_caches(self, transaction): | |
|
769 | if self.nodemap_file is not None: | |
|
770 | if transaction is None: | |
|
771 | nodemaputil.update_persistent_nodemap(self) | |
|
772 | else: | |
|
773 | nodemaputil.setup_persistent_nodemap(transaction, self) | |
|
774 | ||
|
711 | 775 | def clearcaches(self): |
|
712 | 776 | self._revisioncache = None |
|
713 | 777 | self._chainbasecache.clear() |
|
714 | 778 | self._chunkcache = (0, b'') |
|
715 | 779 | self._pcache = {} |
|
780 | self._nodemap_docket = None | |
|
716 | 781 | self.index.clearcaches() |
|
782 | # The python code is the one responsible for validating the docket, we | |
|
783 | # end up having to refresh it here. | |
|
784 | use_nodemap = ( | |
|
785 | not self._inline | |
|
786 | and self.nodemap_file is not None | |
|
787 | and util.safehasattr(self.index, 'update_nodemap_data') | |
|
788 | ) | |
|
789 | if use_nodemap: | |
|
790 | nodemap_data = nodemaputil.persisted_data(self) | |
|
791 | if nodemap_data is not None: | |
|
792 | self._nodemap_docket = nodemap_data[0] | |
|
793 | self.index.update_nodemap_data(*nodemap_data) | |
|
717 | 794 | |
|
718 | 795 | def rev(self, node): |
|
719 | 796 | try: |
@@ -898,9 +975,6 b' class revlog(object):' | |||
|
898 | 975 | if rustancestor is not None: |
|
899 | 976 | lazyancestors = rustancestor.LazyAncestors |
|
900 | 977 | arg = self.index |
|
901 | elif util.safehasattr(parsers, b'rustlazyancestors'): | |
|
902 | lazyancestors = ancestor.rustlazyancestors | |
|
903 | arg = self.index | |
|
904 | 978 | else: |
|
905 | 979 | lazyancestors = ancestor.lazyancestors |
|
906 | 980 | arg = self._uncheckedparentrevs |
@@ -1239,7 +1313,7 b' class revlog(object):' | |||
|
1239 | 1313 | else: |
|
1240 | 1314 | start = self.rev(start) |
|
1241 | 1315 | |
|
1242 |
stoprevs = |
|
|
1316 | stoprevs = {self.rev(n) for n in stop or []} | |
|
1243 | 1317 | |
|
1244 | 1318 | revs = dagop.headrevssubset( |
|
1245 | 1319 | self.revs, self.parentrevs, startrev=start, stoprevs=stoprevs |
@@ -1960,6 +2034,7 b' class revlog(object):' | |||
|
1960 | 2034 | # manager |
|
1961 | 2035 | |
|
1962 | 2036 | tr.replace(self.indexfile, trindex * self._io.size) |
|
2037 | nodemaputil.setup_persistent_nodemap(tr, self) | |
|
1963 | 2038 | self._chunkclear() |
|
1964 | 2039 | |
|
1965 | 2040 | def _nodeduplicatecallback(self, transaction, node): |
@@ -2286,6 +2361,7 b' class revlog(object):' | |||
|
2286 | 2361 | ifh.write(data[0]) |
|
2287 | 2362 | ifh.write(data[1]) |
|
2288 | 2363 | self._enforceinlinesize(transaction, ifh) |
|
2364 | nodemaputil.setup_persistent_nodemap(transaction, self) | |
|
2289 | 2365 | |
|
2290 | 2366 | def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None): |
|
2291 | 2367 | """ |
This diff has been collapsed as it changes many lines, (615 lines changed) Show them Hide them | |||
@@ -7,9 +7,622 b'' | |||
|
7 | 7 | # GNU General Public License version 2 or any later version. |
|
8 | 8 | |
|
9 | 9 | from __future__ import absolute_import |
|
10 | from .. import error | |
|
10 | ||
|
11 | import errno | |
|
12 | import os | |
|
13 | import re | |
|
14 | import struct | |
|
15 | ||
|
16 | from .. import ( | |
|
17 | error, | |
|
18 | node as nodemod, | |
|
19 | util, | |
|
20 | ) | |
|
11 | 21 | |
|
12 | 22 | |
|
13 | 23 | class NodeMap(dict): |
|
14 | 24 | def __missing__(self, x): |
|
15 | 25 | raise error.RevlogError(b'unknown node: %s' % x) |
|
26 | ||
|
27 | ||
|
28 | def persisted_data(revlog): | |
|
29 | """read the nodemap for a revlog from disk""" | |
|
30 | if revlog.nodemap_file is None: | |
|
31 | return None | |
|
32 | pdata = revlog.opener.tryread(revlog.nodemap_file) | |
|
33 | if not pdata: | |
|
34 | return None | |
|
35 | offset = 0 | |
|
36 | (version,) = S_VERSION.unpack(pdata[offset : offset + S_VERSION.size]) | |
|
37 | if version != ONDISK_VERSION: | |
|
38 | return None | |
|
39 | offset += S_VERSION.size | |
|
40 | headers = S_HEADER.unpack(pdata[offset : offset + S_HEADER.size]) | |
|
41 | uid_size, tip_rev, data_length, data_unused, tip_node_size = headers | |
|
42 | offset += S_HEADER.size | |
|
43 | docket = NodeMapDocket(pdata[offset : offset + uid_size]) | |
|
44 | offset += uid_size | |
|
45 | docket.tip_rev = tip_rev | |
|
46 | docket.tip_node = pdata[offset : offset + tip_node_size] | |
|
47 | docket.data_length = data_length | |
|
48 | docket.data_unused = data_unused | |
|
49 | ||
|
50 | filename = _rawdata_filepath(revlog, docket) | |
|
51 | use_mmap = revlog.opener.options.get(b"exp-persistent-nodemap.mmap") | |
|
52 | try: | |
|
53 | with revlog.opener(filename) as fd: | |
|
54 | if use_mmap: | |
|
55 | data = util.buffer(util.mmapread(fd, data_length)) | |
|
56 | else: | |
|
57 | data = fd.read(data_length) | |
|
58 | except OSError as e: | |
|
59 | if e.errno != errno.ENOENT: | |
|
60 | raise | |
|
61 | if len(data) < data_length: | |
|
62 | return None | |
|
63 | return docket, data | |
|
64 | ||
|
65 | ||
|
66 | def setup_persistent_nodemap(tr, revlog): | |
|
67 | """Install whatever is needed transaction side to persist a nodemap on disk | |
|
68 | ||
|
69 | (only actually persist the nodemap if this is relevant for this revlog) | |
|
70 | """ | |
|
71 | if revlog._inline: | |
|
72 | return # inlined revlog are too small for this to be relevant | |
|
73 | if revlog.nodemap_file is None: | |
|
74 | return # we do not use persistent_nodemap on this revlog | |
|
75 | ||
|
76 | # we need to happen after the changelog finalization, in that use "cl-" | |
|
77 | callback_id = b"nm-revlog-persistent-nodemap-%s" % revlog.nodemap_file | |
|
78 | if tr.hasfinalize(callback_id): | |
|
79 | return # no need to register again | |
|
80 | tr.addpending( | |
|
81 | callback_id, lambda tr: _persist_nodemap(tr, revlog, pending=True) | |
|
82 | ) | |
|
83 | tr.addfinalize(callback_id, lambda tr: _persist_nodemap(tr, revlog)) | |
|
84 | ||
|
85 | ||
|
86 | class _NoTransaction(object): | |
|
87 | """transaction like object to update the nodemap outside a transaction | |
|
88 | """ | |
|
89 | ||
|
90 | def __init__(self): | |
|
91 | self._postclose = {} | |
|
92 | ||
|
93 | def addpostclose(self, callback_id, callback_func): | |
|
94 | self._postclose[callback_id] = callback_func | |
|
95 | ||
|
96 | def registertmp(self, *args, **kwargs): | |
|
97 | pass | |
|
98 | ||
|
99 | def addbackup(self, *args, **kwargs): | |
|
100 | pass | |
|
101 | ||
|
102 | def add(self, *args, **kwargs): | |
|
103 | pass | |
|
104 | ||
|
105 | def addabort(self, *args, **kwargs): | |
|
106 | pass | |
|
107 | ||
|
108 | ||
|
109 | def update_persistent_nodemap(revlog): | |
|
110 | """update the persistent nodemap right now | |
|
111 | ||
|
112 | To be used for updating the nodemap on disk outside of a normal transaction | |
|
113 | setup (eg, `debugupdatecache`). | |
|
114 | """ | |
|
115 | notr = _NoTransaction() | |
|
116 | _persist_nodemap(notr, revlog) | |
|
117 | for k in sorted(notr._postclose): | |
|
118 | notr._postclose[k](None) | |
|
119 | ||
|
120 | ||
|
121 | def _persist_nodemap(tr, revlog, pending=False): | |
|
122 | """Write nodemap data on disk for a given revlog | |
|
123 | """ | |
|
124 | if getattr(revlog, 'filteredrevs', ()): | |
|
125 | raise error.ProgrammingError( | |
|
126 | "cannot persist nodemap of a filtered changelog" | |
|
127 | ) | |
|
128 | if revlog.nodemap_file is None: | |
|
129 | msg = "calling persist nodemap on a revlog without the feature enableb" | |
|
130 | raise error.ProgrammingError(msg) | |
|
131 | ||
|
132 | can_incremental = util.safehasattr(revlog.index, "nodemap_data_incremental") | |
|
133 | ondisk_docket = revlog._nodemap_docket | |
|
134 | feed_data = util.safehasattr(revlog.index, "update_nodemap_data") | |
|
135 | use_mmap = revlog.opener.options.get(b"exp-persistent-nodemap.mmap") | |
|
136 | ||
|
137 | data = None | |
|
138 | # first attemp an incremental update of the data | |
|
139 | if can_incremental and ondisk_docket is not None: | |
|
140 | target_docket = revlog._nodemap_docket.copy() | |
|
141 | ( | |
|
142 | src_docket, | |
|
143 | data_changed_count, | |
|
144 | data, | |
|
145 | ) = revlog.index.nodemap_data_incremental() | |
|
146 | new_length = target_docket.data_length + len(data) | |
|
147 | new_unused = target_docket.data_unused + data_changed_count | |
|
148 | if src_docket != target_docket: | |
|
149 | data = None | |
|
150 | elif new_length <= (new_unused * 10): # under 10% of unused data | |
|
151 | data = None | |
|
152 | else: | |
|
153 | datafile = _rawdata_filepath(revlog, target_docket) | |
|
154 | # EXP-TODO: if this is a cache, this should use a cache vfs, not a | |
|
155 | # store vfs | |
|
156 | tr.add(datafile, target_docket.data_length) | |
|
157 | with revlog.opener(datafile, b'r+') as fd: | |
|
158 | fd.seek(target_docket.data_length) | |
|
159 | fd.write(data) | |
|
160 | if feed_data: | |
|
161 | if use_mmap: | |
|
162 | fd.seek(0) | |
|
163 | new_data = fd.read(new_length) | |
|
164 | else: | |
|
165 | fd.flush() | |
|
166 | new_data = util.buffer(util.mmapread(fd, new_length)) | |
|
167 | target_docket.data_length = new_length | |
|
168 | target_docket.data_unused = new_unused | |
|
169 | ||
|
170 | if data is None: | |
|
171 | # otherwise fallback to a full new export | |
|
172 | target_docket = NodeMapDocket() | |
|
173 | datafile = _rawdata_filepath(revlog, target_docket) | |
|
174 | if util.safehasattr(revlog.index, "nodemap_data_all"): | |
|
175 | data = revlog.index.nodemap_data_all() | |
|
176 | else: | |
|
177 | data = persistent_data(revlog.index) | |
|
178 | # EXP-TODO: if this is a cache, this should use a cache vfs, not a | |
|
179 | # store vfs | |
|
180 | ||
|
181 | tryunlink = revlog.opener.tryunlink | |
|
182 | ||
|
183 | def abortck(tr): | |
|
184 | tryunlink(datafile) | |
|
185 | ||
|
186 | callback_id = b"delete-%s" % datafile | |
|
187 | ||
|
188 | # some flavor of the transaction abort does not cleanup new file, it | |
|
189 | # simply empty them. | |
|
190 | tr.addabort(callback_id, abortck) | |
|
191 | with revlog.opener(datafile, b'w+') as fd: | |
|
192 | fd.write(data) | |
|
193 | if feed_data: | |
|
194 | if use_mmap: | |
|
195 | new_data = data | |
|
196 | else: | |
|
197 | fd.flush() | |
|
198 | new_data = util.buffer(util.mmapread(fd, len(data))) | |
|
199 | target_docket.data_length = len(data) | |
|
200 | target_docket.tip_rev = revlog.tiprev() | |
|
201 | target_docket.tip_node = revlog.node(target_docket.tip_rev) | |
|
202 | # EXP-TODO: if this is a cache, this should use a cache vfs, not a | |
|
203 | # store vfs | |
|
204 | file_path = revlog.nodemap_file | |
|
205 | if pending: | |
|
206 | file_path += b'.a' | |
|
207 | tr.registertmp(file_path) | |
|
208 | else: | |
|
209 | tr.addbackup(file_path) | |
|
210 | ||
|
211 | with revlog.opener(file_path, b'w', atomictemp=True) as fp: | |
|
212 | fp.write(target_docket.serialize()) | |
|
213 | revlog._nodemap_docket = target_docket | |
|
214 | if feed_data: | |
|
215 | revlog.index.update_nodemap_data(target_docket, new_data) | |
|
216 | ||
|
217 | # search for old index file in all cases, some older process might have | |
|
218 | # left one behind. | |
|
219 | olds = _other_rawdata_filepath(revlog, target_docket) | |
|
220 | if olds: | |
|
221 | realvfs = getattr(revlog, '_realopener', revlog.opener) | |
|
222 | ||
|
223 | def cleanup(tr): | |
|
224 | for oldfile in olds: | |
|
225 | realvfs.tryunlink(oldfile) | |
|
226 | ||
|
227 | callback_id = b"revlog-cleanup-nodemap-%s" % revlog.nodemap_file | |
|
228 | tr.addpostclose(callback_id, cleanup) | |
|
229 | ||
|
230 | ||
|
231 | ### Nodemap docket file | |
|
232 | # | |
|
233 | # The nodemap data are stored on disk using 2 files: | |
|
234 | # | |
|
235 | # * a raw data files containing a persistent nodemap | |
|
236 | # (see `Nodemap Trie` section) | |
|
237 | # | |
|
238 | # * a small "docket" file containing medatadata | |
|
239 | # | |
|
240 | # While the nodemap data can be multiple tens of megabytes, the "docket" is | |
|
241 | # small, it is easy to update it automatically or to duplicated its content | |
|
242 | # during a transaction. | |
|
243 | # | |
|
244 | # Multiple raw data can exist at the same time (The currently valid one and a | |
|
245 | # new one beind used by an in progress transaction). To accomodate this, the | |
|
246 | # filename hosting the raw data has a variable parts. The exact filename is | |
|
247 | # specified inside the "docket" file. | |
|
248 | # | |
|
249 | # The docket file contains information to find, qualify and validate the raw | |
|
250 | # data. Its content is currently very light, but it will expand as the on disk | |
|
251 | # nodemap gains the necessary features to be used in production. | |
|
252 | ||
|
253 | # version 0 is experimental, no BC garantee, do no use outside of tests. | |
|
254 | ONDISK_VERSION = 0 | |
|
255 | S_VERSION = struct.Struct(">B") | |
|
256 | S_HEADER = struct.Struct(">BQQQQ") | |
|
257 | ||
|
258 | ID_SIZE = 8 | |
|
259 | ||
|
260 | ||
|
261 | def _make_uid(): | |
|
262 | """return a new unique identifier. | |
|
263 | ||
|
264 | The identifier is random and composed of ascii characters.""" | |
|
265 | return nodemod.hex(os.urandom(ID_SIZE)) | |
|
266 | ||
|
267 | ||
|
268 | class NodeMapDocket(object): | |
|
269 | """metadata associated with persistent nodemap data | |
|
270 | ||
|
271 | The persistent data may come from disk or be on their way to disk. | |
|
272 | """ | |
|
273 | ||
|
274 | def __init__(self, uid=None): | |
|
275 | if uid is None: | |
|
276 | uid = _make_uid() | |
|
277 | # a unique identifier for the data file: | |
|
278 | # - When new data are appended, it is preserved. | |
|
279 | # - When a new data file is created, a new identifier is generated. | |
|
280 | self.uid = uid | |
|
281 | # the tipmost revision stored in the data file. This revision and all | |
|
282 | # revision before it are expected to be encoded in the data file. | |
|
283 | self.tip_rev = None | |
|
284 | # the node of that tipmost revision, if it mismatch the current index | |
|
285 | # data the docket is not valid for the current index and should be | |
|
286 | # discarded. | |
|
287 | # | |
|
288 | # note: this method is not perfect as some destructive operation could | |
|
289 | # preserve the same tip_rev + tip_node while altering lower revision. | |
|
290 | # However this multiple other caches have the same vulnerability (eg: | |
|
291 | # brancmap cache). | |
|
292 | self.tip_node = None | |
|
293 | # the size (in bytes) of the persisted data to encode the nodemap valid | |
|
294 | # for `tip_rev`. | |
|
295 | # - data file shorter than this are corrupted, | |
|
296 | # - any extra data should be ignored. | |
|
297 | self.data_length = None | |
|
298 | # the amount (in bytes) of "dead" data, still in the data file but no | |
|
299 | # longer used for the nodemap. | |
|
300 | self.data_unused = 0 | |
|
301 | ||
|
302 | def copy(self): | |
|
303 | new = NodeMapDocket(uid=self.uid) | |
|
304 | new.tip_rev = self.tip_rev | |
|
305 | new.tip_node = self.tip_node | |
|
306 | new.data_length = self.data_length | |
|
307 | new.data_unused = self.data_unused | |
|
308 | return new | |
|
309 | ||
|
310 | def __cmp__(self, other): | |
|
311 | if self.uid < other.uid: | |
|
312 | return -1 | |
|
313 | if self.uid > other.uid: | |
|
314 | return 1 | |
|
315 | elif self.data_length < other.data_length: | |
|
316 | return -1 | |
|
317 | elif self.data_length > other.data_length: | |
|
318 | return 1 | |
|
319 | return 0 | |
|
320 | ||
|
321 | def __eq__(self, other): | |
|
322 | return self.uid == other.uid and self.data_length == other.data_length | |
|
323 | ||
|
324 | def serialize(self): | |
|
325 | """return serialized bytes for a docket using the passed uid""" | |
|
326 | data = [] | |
|
327 | data.append(S_VERSION.pack(ONDISK_VERSION)) | |
|
328 | headers = ( | |
|
329 | len(self.uid), | |
|
330 | self.tip_rev, | |
|
331 | self.data_length, | |
|
332 | self.data_unused, | |
|
333 | len(self.tip_node), | |
|
334 | ) | |
|
335 | data.append(S_HEADER.pack(*headers)) | |
|
336 | data.append(self.uid) | |
|
337 | data.append(self.tip_node) | |
|
338 | return b''.join(data) | |
|
339 | ||
|
340 | ||
|
341 | def _rawdata_filepath(revlog, docket): | |
|
342 | """The (vfs relative) nodemap's rawdata file for a given uid""" | |
|
343 | if revlog.nodemap_file.endswith(b'.n.a'): | |
|
344 | prefix = revlog.nodemap_file[:-4] | |
|
345 | else: | |
|
346 | prefix = revlog.nodemap_file[:-2] | |
|
347 | return b"%s-%s.nd" % (prefix, docket.uid) | |
|
348 | ||
|
349 | ||
|
350 | def _other_rawdata_filepath(revlog, docket): | |
|
351 | prefix = revlog.nodemap_file[:-2] | |
|
352 | pattern = re.compile(br"(^|/)%s-[0-9a-f]+\.nd$" % prefix) | |
|
353 | new_file_path = _rawdata_filepath(revlog, docket) | |
|
354 | new_file_name = revlog.opener.basename(new_file_path) | |
|
355 | dirpath = revlog.opener.dirname(new_file_path) | |
|
356 | others = [] | |
|
357 | for f in revlog.opener.listdir(dirpath): | |
|
358 | if pattern.match(f) and f != new_file_name: | |
|
359 | others.append(f) | |
|
360 | return others | |
|
361 | ||
|
362 | ||
|
363 | ### Nodemap Trie | |
|
364 | # | |
|
365 | # This is a simple reference implementation to compute and persist a nodemap | |
|
366 | # trie. This reference implementation is write only. The python version of this | |
|
367 | # is not expected to be actually used, since it wont provide performance | |
|
368 | # improvement over existing non-persistent C implementation. | |
|
369 | # | |
|
370 | # The nodemap is persisted as Trie using 4bits-address/16-entries block. each | |
|
371 | # revision can be adressed using its node shortest prefix. | |
|
372 | # | |
|
373 | # The trie is stored as a sequence of block. Each block contains 16 entries | |
|
374 | # (signed 64bit integer, big endian). Each entry can be one of the following: | |
|
375 | # | |
|
376 | # * value >= 0 -> index of sub-block | |
|
377 | # * value == -1 -> no value | |
|
378 | # * value < -1 -> a revision value: rev = -(value+10) | |
|
379 | # | |
|
380 | # The implementation focus on simplicity, not on performance. A Rust | |
|
381 | # implementation should provide a efficient version of the same binary | |
|
382 | # persistence. This reference python implementation is never meant to be | |
|
383 | # extensively use in production. | |
|
384 | ||
|
385 | ||
|
386 | def persistent_data(index): | |
|
387 | """return the persistent binary form for a nodemap for a given index | |
|
388 | """ | |
|
389 | trie = _build_trie(index) | |
|
390 | return _persist_trie(trie) | |
|
391 | ||
|
392 | ||
|
393 | def update_persistent_data(index, root, max_idx, last_rev): | |
|
394 | """return the incremental update for persistent nodemap from a given index | |
|
395 | """ | |
|
396 | changed_block, trie = _update_trie(index, root, last_rev) | |
|
397 | return ( | |
|
398 | changed_block * S_BLOCK.size, | |
|
399 | _persist_trie(trie, existing_idx=max_idx), | |
|
400 | ) | |
|
401 | ||
|
402 | ||
|
403 | S_BLOCK = struct.Struct(">" + ("l" * 16)) | |
|
404 | ||
|
405 | NO_ENTRY = -1 | |
|
406 | # rev 0 need to be -2 because 0 is used by block, -1 is a special value. | |
|
407 | REV_OFFSET = 2 | |
|
408 | ||
|
409 | ||
|
410 | def _transform_rev(rev): | |
|
411 | """Return the number used to represent the rev in the tree. | |
|
412 | ||
|
413 | (or retrieve a rev number from such representation) | |
|
414 | ||
|
415 | Note that this is an involution, a function equal to its inverse (i.e. | |
|
416 | which gives the identity when applied to itself). | |
|
417 | """ | |
|
418 | return -(rev + REV_OFFSET) | |
|
419 | ||
|
420 | ||
|
421 | def _to_int(hex_digit): | |
|
422 | """turn an hexadecimal digit into a proper integer""" | |
|
423 | return int(hex_digit, 16) | |
|
424 | ||
|
425 | ||
|
426 | class Block(dict): | |
|
427 | """represent a block of the Trie | |
|
428 | ||
|
429 | contains up to 16 entry indexed from 0 to 15""" | |
|
430 | ||
|
431 | def __init__(self): | |
|
432 | super(Block, self).__init__() | |
|
433 | # If this block exist on disk, here is its ID | |
|
434 | self.ondisk_id = None | |
|
435 | ||
|
436 | def __iter__(self): | |
|
437 | return iter(self.get(i) for i in range(16)) | |
|
438 | ||
|
439 | ||
|
440 | def _build_trie(index): | |
|
441 | """build a nodemap trie | |
|
442 | ||
|
443 | The nodemap stores revision number for each unique prefix. | |
|
444 | ||
|
445 | Each block is a dictionary with keys in `[0, 15]`. Values are either | |
|
446 | another block or a revision number. | |
|
447 | """ | |
|
448 | root = Block() | |
|
449 | for rev in range(len(index)): | |
|
450 | hex = nodemod.hex(index[rev][7]) | |
|
451 | _insert_into_block(index, 0, root, rev, hex) | |
|
452 | return root | |
|
453 | ||
|
454 | ||
|
455 | def _update_trie(index, root, last_rev): | |
|
456 | """consume""" | |
|
457 | changed = 0 | |
|
458 | for rev in range(last_rev + 1, len(index)): | |
|
459 | hex = nodemod.hex(index[rev][7]) | |
|
460 | changed += _insert_into_block(index, 0, root, rev, hex) | |
|
461 | return changed, root | |
|
462 | ||
|
463 | ||
|
464 | def _insert_into_block(index, level, block, current_rev, current_hex): | |
|
465 | """insert a new revision in a block | |
|
466 | ||
|
467 | index: the index we are adding revision for | |
|
468 | level: the depth of the current block in the trie | |
|
469 | block: the block currently being considered | |
|
470 | current_rev: the revision number we are adding | |
|
471 | current_hex: the hexadecimal representation of the of that revision | |
|
472 | """ | |
|
473 | changed = 1 | |
|
474 | if block.ondisk_id is not None: | |
|
475 | block.ondisk_id = None | |
|
476 | hex_digit = _to_int(current_hex[level : level + 1]) | |
|
477 | entry = block.get(hex_digit) | |
|
478 | if entry is None: | |
|
479 | # no entry, simply store the revision number | |
|
480 | block[hex_digit] = current_rev | |
|
481 | elif isinstance(entry, dict): | |
|
482 | # need to recurse to an underlying block | |
|
483 | changed += _insert_into_block( | |
|
484 | index, level + 1, entry, current_rev, current_hex | |
|
485 | ) | |
|
486 | else: | |
|
487 | # collision with a previously unique prefix, inserting new | |
|
488 | # vertices to fit both entry. | |
|
489 | other_hex = nodemod.hex(index[entry][7]) | |
|
490 | other_rev = entry | |
|
491 | new = Block() | |
|
492 | block[hex_digit] = new | |
|
493 | _insert_into_block(index, level + 1, new, other_rev, other_hex) | |
|
494 | _insert_into_block(index, level + 1, new, current_rev, current_hex) | |
|
495 | return changed | |
|
496 | ||
|
497 | ||
|
498 | def _persist_trie(root, existing_idx=None): | |
|
499 | """turn a nodemap trie into persistent binary data | |
|
500 | ||
|
501 | See `_build_trie` for nodemap trie structure""" | |
|
502 | block_map = {} | |
|
503 | if existing_idx is not None: | |
|
504 | base_idx = existing_idx + 1 | |
|
505 | else: | |
|
506 | base_idx = 0 | |
|
507 | chunks = [] | |
|
508 | for tn in _walk_trie(root): | |
|
509 | if tn.ondisk_id is not None: | |
|
510 | block_map[id(tn)] = tn.ondisk_id | |
|
511 | else: | |
|
512 | block_map[id(tn)] = len(chunks) + base_idx | |
|
513 | chunks.append(_persist_block(tn, block_map)) | |
|
514 | return b''.join(chunks) | |
|
515 | ||
|
516 | ||
|
517 | def _walk_trie(block): | |
|
518 | """yield all the block in a trie | |
|
519 | ||
|
520 | Children blocks are always yield before their parent block. | |
|
521 | """ | |
|
522 | for (__, item) in sorted(block.items()): | |
|
523 | if isinstance(item, dict): | |
|
524 | for sub_block in _walk_trie(item): | |
|
525 | yield sub_block | |
|
526 | yield block | |
|
527 | ||
|
528 | ||
|
529 | def _persist_block(block_node, block_map): | |
|
530 | """produce persistent binary data for a single block | |
|
531 | ||
|
532 | Children block are assumed to be already persisted and present in | |
|
533 | block_map. | |
|
534 | """ | |
|
535 | data = tuple(_to_value(v, block_map) for v in block_node) | |
|
536 | return S_BLOCK.pack(*data) | |
|
537 | ||
|
538 | ||
|
539 | def _to_value(item, block_map): | |
|
540 | """persist any value as an integer""" | |
|
541 | if item is None: | |
|
542 | return NO_ENTRY | |
|
543 | elif isinstance(item, dict): | |
|
544 | return block_map[id(item)] | |
|
545 | else: | |
|
546 | return _transform_rev(item) | |
|
547 | ||
|
548 | ||
|
549 | def parse_data(data): | |
|
550 | """parse parse nodemap data into a nodemap Trie""" | |
|
551 | if (len(data) % S_BLOCK.size) != 0: | |
|
552 | msg = "nodemap data size is not a multiple of block size (%d): %d" | |
|
553 | raise error.Abort(msg % (S_BLOCK.size, len(data))) | |
|
554 | if not data: | |
|
555 | return Block(), None | |
|
556 | block_map = {} | |
|
557 | new_blocks = [] | |
|
558 | for i in range(0, len(data), S_BLOCK.size): | |
|
559 | block = Block() | |
|
560 | block.ondisk_id = len(block_map) | |
|
561 | block_map[block.ondisk_id] = block | |
|
562 | block_data = data[i : i + S_BLOCK.size] | |
|
563 | values = S_BLOCK.unpack(block_data) | |
|
564 | new_blocks.append((block, values)) | |
|
565 | for b, values in new_blocks: | |
|
566 | for idx, v in enumerate(values): | |
|
567 | if v == NO_ENTRY: | |
|
568 | continue | |
|
569 | elif v >= 0: | |
|
570 | b[idx] = block_map[v] | |
|
571 | else: | |
|
572 | b[idx] = _transform_rev(v) | |
|
573 | return block, i // S_BLOCK.size | |
|
574 | ||
|
575 | ||
|
576 | # debug utility | |
|
577 | ||
|
578 | ||
|
579 | def check_data(ui, index, data): | |
|
580 | """verify that the provided nodemap data are valid for the given idex""" | |
|
581 | ret = 0 | |
|
582 | ui.status((b"revision in index: %d\n") % len(index)) | |
|
583 | root, __ = parse_data(data) | |
|
584 | all_revs = set(_all_revisions(root)) | |
|
585 | ui.status((b"revision in nodemap: %d\n") % len(all_revs)) | |
|
586 | for r in range(len(index)): | |
|
587 | if r not in all_revs: | |
|
588 | msg = b" revision missing from nodemap: %d\n" % r | |
|
589 | ui.write_err(msg) | |
|
590 | ret = 1 | |
|
591 | else: | |
|
592 | all_revs.remove(r) | |
|
593 | nm_rev = _find_node(root, nodemod.hex(index[r][7])) | |
|
594 | if nm_rev is None: | |
|
595 | msg = b" revision node does not match any entries: %d\n" % r | |
|
596 | ui.write_err(msg) | |
|
597 | ret = 1 | |
|
598 | elif nm_rev != r: | |
|
599 | msg = ( | |
|
600 | b" revision node does not match the expected revision: " | |
|
601 | b"%d != %d\n" % (r, nm_rev) | |
|
602 | ) | |
|
603 | ui.write_err(msg) | |
|
604 | ret = 1 | |
|
605 | ||
|
606 | if all_revs: | |
|
607 | for r in sorted(all_revs): | |
|
608 | msg = b" extra revision in nodemap: %d\n" % r | |
|
609 | ui.write_err(msg) | |
|
610 | ret = 1 | |
|
611 | return ret | |
|
612 | ||
|
613 | ||
|
614 | def _all_revisions(root): | |
|
615 | """return all revisions stored in a Trie""" | |
|
616 | for block in _walk_trie(root): | |
|
617 | for v in block: | |
|
618 | if v is None or isinstance(v, Block): | |
|
619 | continue | |
|
620 | yield v | |
|
621 | ||
|
622 | ||
|
623 | def _find_node(block, node): | |
|
624 | """find the revision associated with a given node""" | |
|
625 | entry = block.get(_to_int(node[0:1])) | |
|
626 | if isinstance(entry, dict): | |
|
627 | return _find_node(entry, node[1:]) | |
|
628 | return entry |
@@ -247,7 +247,15 b' def notset(repo, subset, x, order):' | |||
|
247 | 247 | |
|
248 | 248 | |
|
249 | 249 | def relationset(repo, subset, x, y, order): |
|
250 | raise error.ParseError(_(b"can't use a relation in this context")) | |
|
250 | # this is pretty basic implementation of 'x#y' operator, still | |
|
251 | # experimental so undocumented. see the wiki for further ideas. | |
|
252 | # https://www.mercurial-scm.org/wiki/RevsetOperatorPlan | |
|
253 | rel = getsymbol(y) | |
|
254 | if rel in relations: | |
|
255 | return relations[rel](repo, subset, x, rel, order) | |
|
256 | ||
|
257 | relnames = [r for r in relations.keys() if len(r) > 1] | |
|
258 | raise error.UnknownIdentifier(rel, relnames) | |
|
251 | 259 | |
|
252 | 260 | |
|
253 | 261 | def _splitrange(a, b): |
@@ -281,7 +289,12 b' def _splitrange(a, b):' | |||
|
281 | 289 | return ancdepths, descdepths |
|
282 | 290 | |
|
283 | 291 | |
|
284 |
def generationsrel(repo, subset, x, rel, |
|
|
292 | def generationsrel(repo, subset, x, rel, order): | |
|
293 | z = (b'rangeall', None) | |
|
294 | return generationssubrel(repo, subset, x, rel, z, order) | |
|
295 | ||
|
296 | ||
|
297 | def generationssubrel(repo, subset, x, rel, z, order): | |
|
285 | 298 | # TODO: rewrite tests, and drop startdepth argument from ancestors() and |
|
286 | 299 | # descendants() predicates |
|
287 | 300 | a, b = getintrange( |
@@ -769,6 +782,38 b' def commonancestors(repo, subset, x):' | |||
|
769 | 782 | return subset |
|
770 | 783 | |
|
771 | 784 | |
|
785 | @predicate(b'conflictlocal()', safe=True) | |
|
786 | def conflictlocal(repo, subset, x): | |
|
787 | """The local side of the merge, if currently in an unresolved merge. | |
|
788 | ||
|
789 | "merge" here includes merge conflicts from e.g. 'hg rebase' or 'hg graft'. | |
|
790 | """ | |
|
791 | getargs(x, 0, 0, _(b"conflictlocal takes no arguments")) | |
|
792 | from . import merge | |
|
793 | ||
|
794 | mergestate = merge.mergestate.read(repo) | |
|
795 | if mergestate.active() and repo.changelog.hasnode(mergestate.local): | |
|
796 | return subset & {repo.changelog.rev(mergestate.local)} | |
|
797 | ||
|
798 | return baseset() | |
|
799 | ||
|
800 | ||
|
801 | @predicate(b'conflictother()', safe=True) | |
|
802 | def conflictother(repo, subset, x): | |
|
803 | """The other side of the merge, if currently in an unresolved merge. | |
|
804 | ||
|
805 | "merge" here includes merge conflicts from e.g. 'hg rebase' or 'hg graft'. | |
|
806 | """ | |
|
807 | getargs(x, 0, 0, _(b"conflictother takes no arguments")) | |
|
808 | from . import merge | |
|
809 | ||
|
810 | mergestate = merge.mergestate.read(repo) | |
|
811 | if mergestate.active() and repo.changelog.hasnode(mergestate.other): | |
|
812 | return subset & {repo.changelog.rev(mergestate.other)} | |
|
813 | ||
|
814 | return baseset() | |
|
815 | ||
|
816 | ||
|
772 | 817 | @predicate(b'contains(pattern)', weight=100) |
|
773 | 818 | def contains(repo, subset, x): |
|
774 | 819 | """The revision's manifest contains a file matching pattern (but might not |
@@ -1022,7 +1067,7 b' def extdata(repo, subset, x):' | |||
|
1022 | 1067 | |
|
1023 | 1068 | @predicate(b'extinct()', safe=True) |
|
1024 | 1069 | def extinct(repo, subset, x): |
|
1025 | """Obsolete changesets with obsolete descendants only. | |
|
1070 | """Obsolete changesets with obsolete descendants only. (EXPERIMENTAL) | |
|
1026 | 1071 | """ |
|
1027 | 1072 | # i18n: "extinct" is a keyword |
|
1028 | 1073 | getargs(x, 0, 0, _(b"extinct takes no arguments")) |
@@ -1670,7 +1715,7 b' def none(repo, subset, x):' | |||
|
1670 | 1715 | |
|
1671 | 1716 | @predicate(b'obsolete()', safe=True) |
|
1672 | 1717 | def obsolete(repo, subset, x): |
|
1673 | """Mutable changeset with a newer version.""" | |
|
1718 | """Mutable changeset with a newer version. (EXPERIMENTAL)""" | |
|
1674 | 1719 | # i18n: "obsolete" is a keyword |
|
1675 | 1720 | getargs(x, 0, 0, _(b"obsolete takes no arguments")) |
|
1676 | 1721 | obsoletes = obsmod.getrevs(repo, b'obsolete') |
@@ -1843,7 +1888,7 b' def parents(repo, subset, x):' | |||
|
1843 | 1888 | The set of all parents for all changesets in set, or the working directory. |
|
1844 | 1889 | """ |
|
1845 | 1890 | if x is None: |
|
1846 |
ps = |
|
|
1891 | ps = {p.rev() for p in repo[x].parents()} | |
|
1847 | 1892 | else: |
|
1848 | 1893 | ps = set() |
|
1849 | 1894 | cl = repo.changelog |
@@ -2050,19 +2095,11 b' def removes(repo, subset, x):' | |||
|
2050 | 2095 | |
|
2051 | 2096 | @predicate(b'rev(number)', safe=True) |
|
2052 | 2097 | def rev(repo, subset, x): |
|
2053 | """Revision with the given numeric identifier. | |
|
2054 | """ | |
|
2055 | # i18n: "rev" is a keyword | |
|
2056 | l = getargs(x, 1, 1, _(b"rev requires one argument")) | |
|
2098 | """Revision with the given numeric identifier.""" | |
|
2057 | 2099 | try: |
|
2058 | # i18n: "rev" is a keyword | |
|
2059 | l = int(getstring(l[0], _(b"rev requires a number"))) | |
|
2060 | except (TypeError, ValueError): | |
|
2061 | # i18n: "rev" is a keyword | |
|
2062 | raise error.ParseError(_(b"rev expects a number")) | |
|
2063 | if l not in repo.changelog and l not in _virtualrevs: | |
|
2100 | return _rev(repo, subset, x) | |
|
2101 | except error.RepoLookupError: | |
|
2064 | 2102 | return baseset() |
|
2065 | return subset & baseset([l]) | |
|
2066 | 2103 | |
|
2067 | 2104 | |
|
2068 | 2105 | @predicate(b'_rev(number)', safe=True) |
@@ -2076,7 +2113,11 b' def _rev(repo, subset, x):' | |||
|
2076 | 2113 | except (TypeError, ValueError): |
|
2077 | 2114 | # i18n: "rev" is a keyword |
|
2078 | 2115 | raise error.ParseError(_(b"rev expects a number")) |
|
2116 | if l not in _virtualrevs: | |
|
2117 | try: | |
|
2079 | 2118 | repo.changelog.node(l) # check that the rev exists |
|
2119 | except IndexError: | |
|
2120 | raise error.RepoLookupError(_(b"unknown revision '%d'") % l) | |
|
2080 | 2121 | return subset & baseset([l]) |
|
2081 | 2122 | |
|
2082 | 2123 | |
@@ -2405,14 +2446,15 b' def _mapbynodefunc(repo, s, f):' | |||
|
2405 | 2446 | cl = repo.unfiltered().changelog |
|
2406 | 2447 | torev = cl.index.get_rev |
|
2407 | 2448 | tonode = cl.node |
|
2408 |
result = |
|
|
2449 | result = {torev(n) for n in f(tonode(r) for r in s)} | |
|
2409 | 2450 | result.discard(None) |
|
2410 | 2451 | return smartset.baseset(result - repo.changelog.filteredrevs) |
|
2411 | 2452 | |
|
2412 | 2453 | |
|
2413 | 2454 | @predicate(b'successors(set)', safe=True) |
|
2414 | 2455 | def successors(repo, subset, x): |
|
2415 |
"""All successors for set, including the given set themselves |
|
|
2456 | """All successors for set, including the given set themselves. | |
|
2457 | (EXPERIMENTAL)""" | |
|
2416 | 2458 | s = getset(repo, fullreposet(repo), x) |
|
2417 | 2459 | f = lambda nodes: obsutil.allsuccessors(repo.obsstore, nodes) |
|
2418 | 2460 | d = _mapbynodefunc(repo, s, f) |
@@ -2479,6 +2521,19 b' def orphan(repo, subset, x):' | |||
|
2479 | 2521 | return subset & orphan |
|
2480 | 2522 | |
|
2481 | 2523 | |
|
2524 | @predicate(b'unstable()', safe=True) | |
|
2525 | def unstable(repo, subset, x): | |
|
2526 | """Changesets with instabilities. (EXPERIMENTAL) | |
|
2527 | """ | |
|
2528 | # i18n: "unstable" is a keyword | |
|
2529 | getargs(x, 0, 0, b'unstable takes no arguments') | |
|
2530 | _unstable = set() | |
|
2531 | _unstable.update(obsmod.getrevs(repo, b'orphan')) | |
|
2532 | _unstable.update(obsmod.getrevs(repo, b'phasedivergent')) | |
|
2533 | _unstable.update(obsmod.getrevs(repo, b'contentdivergent')) | |
|
2534 | return subset & baseset(_unstable) | |
|
2535 | ||
|
2536 | ||
|
2482 | 2537 | @predicate(b'user(string)', safe=True, weight=10) |
|
2483 | 2538 | def user(repo, subset, x): |
|
2484 | 2539 | """User name contains string. The match is case-insensitive. |
@@ -2605,11 +2660,16 b' methods = {' | |||
|
2605 | 2660 | b"smartset": rawsmartset, |
|
2606 | 2661 | } |
|
2607 | 2662 | |
|
2608 |
|
|
|
2663 | relations = { | |
|
2609 | 2664 | b"g": generationsrel, |
|
2610 | 2665 | b"generations": generationsrel, |
|
2611 | 2666 | } |
|
2612 | 2667 | |
|
2668 | subscriptrelations = { | |
|
2669 | b"g": generationssubrel, | |
|
2670 | b"generations": generationssubrel, | |
|
2671 | } | |
|
2672 | ||
|
2613 | 2673 | |
|
2614 | 2674 | def lookupfn(repo): |
|
2615 | 2675 | return lambda symbol: scmutil.isrevsymbol(repo, symbol) |
@@ -1457,10 +1457,10 b' def movedirstate(repo, newctx, match=Non' | |||
|
1457 | 1457 | # Merge old parent and old working dir copies |
|
1458 | 1458 | oldcopies = copiesmod.pathcopies(newctx, oldctx, match) |
|
1459 | 1459 | oldcopies.update(copies) |
|
1460 |
copies = |
|
|
1461 |
|
|
|
1460 | copies = { | |
|
1461 | dst: oldcopies.get(src, src) | |
|
1462 | 1462 | for dst, src in pycompat.iteritems(oldcopies) |
|
1463 |
|
|
|
1463 | } | |
|
1464 | 1464 | # Adjust the dirstate copies |
|
1465 | 1465 | for dst, src in pycompat.iteritems(copies): |
|
1466 | 1466 | if src not in newctx or dst in newctx or ds[dst] != b'a': |
@@ -1900,8 +1900,11 b' fileprefetchhooks = util.hooks()' | |||
|
1900 | 1900 | _reportstroubledchangesets = True |
|
1901 | 1901 | |
|
1902 | 1902 | |
|
1903 | def registersummarycallback(repo, otr, txnname=b''): | |
|
1903 | def registersummarycallback(repo, otr, txnname=b'', as_validator=False): | |
|
1904 | 1904 | """register a callback to issue a summary after the transaction is closed |
|
1905 | ||
|
1906 | If as_validator is true, then the callbacks are registered as transaction | |
|
1907 | validators instead | |
|
1905 | 1908 | """ |
|
1906 | 1909 | |
|
1907 | 1910 | def txmatch(sources): |
@@ -1927,6 +1930,9 b' def registersummarycallback(repo, otr, t' | |||
|
1927 | 1930 | func(repo, tr) |
|
1928 | 1931 | |
|
1929 | 1932 | newcat = b'%02i-txnreport' % len(categories) |
|
1933 | if as_validator: | |
|
1934 | otr.addvalidator(newcat, wrapped) | |
|
1935 | else: | |
|
1930 | 1936 | otr.addpostclose(newcat, wrapped) |
|
1931 | 1937 | categories.append(newcat) |
|
1932 | 1938 | return wrapped |
@@ -1942,6 +1948,8 b' def registersummarycallback(repo, otr, t' | |||
|
1942 | 1948 | if cgheads: |
|
1943 | 1949 | htext = _(b" (%+d heads)") % cgheads |
|
1944 | 1950 | msg = _(b"added %d changesets with %d changes to %d files%s\n") |
|
1951 | if as_validator: | |
|
1952 | msg = _(b"adding %d changesets with %d changes to %d files%s\n") | |
|
1945 | 1953 | assert repo is not None # help pytype |
|
1946 | 1954 | repo.ui.status(msg % (cgchangesets, cgrevisions, cgfiles, htext)) |
|
1947 | 1955 | |
@@ -1954,7 +1962,10 b' def registersummarycallback(repo, otr, t' | |||
|
1954 | 1962 | if newmarkers: |
|
1955 | 1963 | repo.ui.status(_(b'%i new obsolescence markers\n') % newmarkers) |
|
1956 | 1964 | if obsoleted: |
|
1957 |
|
|
|
1965 | msg = _(b'obsoleted %i changesets\n') | |
|
1966 | if as_validator: | |
|
1967 | msg = _(b'obsoleting %i changesets\n') | |
|
1968 | repo.ui.status(msg % len(obsoleted)) | |
|
1958 | 1969 | |
|
1959 | 1970 | if obsolete.isenabled( |
|
1960 | 1971 | repo, obsolete.createmarkersopt |
@@ -2047,19 +2058,17 b' def registersummarycallback(repo, otr, t' | |||
|
2047 | 2058 | pull/unbundle. |
|
2048 | 2059 | """ |
|
2049 | 2060 | origrepolen = tr.changes.get(b'origrepolen', len(repo)) |
|
2050 | phasetracking = tr.changes.get(b'phases', {}) | |
|
2051 | if not phasetracking: | |
|
2052 | return | |
|
2053 | published = [ | |
|
2054 | rev | |
|
2055 | for rev, (old, new) in pycompat.iteritems(phasetracking) | |
|
2056 | if new == phases.public and rev < origrepolen | |
|
2057 | ] | |
|
2061 | published = [] | |
|
2062 | for revs, (old, new) in tr.changes.get(b'phases', []): | |
|
2063 | if new != phases.public: | |
|
2064 | continue | |
|
2065 | published.extend(rev for rev in revs if rev < origrepolen) | |
|
2058 | 2066 | if not published: |
|
2059 | 2067 | return |
|
2060 | repo.ui.status( | |
|
2061 | _(b'%d local changesets published\n') % len(published) | |
|
2062 | ) | |
|
2068 | msg = _(b'%d local changesets published\n') | |
|
2069 | if as_validator: | |
|
2070 | msg = _(b'%d local changesets will be published\n') | |
|
2071 | repo.ui.status(msg % len(published)) | |
|
2063 | 2072 | |
|
2064 | 2073 | |
|
2065 | 2074 | def getinstabilitymessage(delta, instability): |
@@ -745,7 +745,7 b' def unshelveabort(ui, repo, state):' | |||
|
745 | 745 | try: |
|
746 | 746 | checkparents(repo, state) |
|
747 | 747 | |
|
748 |
merge.update( |
|
|
748 | merge.clean_update(state.pendingctx) | |
|
749 | 749 | if state.activebookmark and state.activebookmark in repo._bookmarks: |
|
750 | 750 | bookmarks.activate(repo, state.activebookmark) |
|
751 | 751 | mergefiles(ui, repo, state.wctx, state.pendingctx) |
@@ -827,10 +827,6 b' def unshelvecontinue(ui, repo, state, op' | |||
|
827 | 827 | ) |
|
828 | 828 | |
|
829 | 829 | if newnode is None: |
|
830 | # If it ended up being a no-op commit, then the normal | |
|
831 | # merge state clean-up path doesn't happen, so do it | |
|
832 | # here. Fix issue5494 | |
|
833 | merge.mergestate.clean(repo) | |
|
834 | 830 | shelvectx = state.pendingctx |
|
835 | 831 | msg = _( |
|
836 | 832 | b'note: unshelved changes already existed ' |
@@ -996,7 +992,6 b' def _rebaserestoredcommit(' | |||
|
996 | 992 | stats = merge.graft( |
|
997 | 993 | repo, |
|
998 | 994 | shelvectx, |
|
999 | shelvectx.p1(), | |
|
1000 | 995 | labels=[b'working-copy', b'shelve'], |
|
1001 | 996 | keepconflictparent=True, |
|
1002 | 997 | ) |
@@ -1032,10 +1027,6 b' def _rebaserestoredcommit(' | |||
|
1032 | 1027 | ) |
|
1033 | 1028 | |
|
1034 | 1029 | if newnode is None: |
|
1035 | # If it ended up being a no-op commit, then the normal | |
|
1036 | # merge state clean-up path doesn't happen, so do it | |
|
1037 | # here. Fix issue5494 | |
|
1038 | merge.mergestate.clean(repo) | |
|
1039 | 1030 | shelvectx = tmpwctx |
|
1040 | 1031 | msg = _( |
|
1041 | 1032 | b'note: unshelved changes already existed ' |
@@ -1083,7 +1074,7 b' def _checkunshelveuntrackedproblems(ui, ' | |||
|
1083 | 1074 | raise error.Abort(m, hint=hint) |
|
1084 | 1075 | |
|
1085 | 1076 | |
|
1086 |
def |
|
|
1077 | def unshelvecmd(ui, repo, *shelved, **opts): | |
|
1087 | 1078 | opts = pycompat.byteskwargs(opts) |
|
1088 | 1079 | abortf = opts.get(b'abort') |
|
1089 | 1080 | continuef = opts.get(b'continue') |
@@ -1130,6 +1121,10 b' def dounshelve(ui, repo, *shelved, **opt' | |||
|
1130 | 1121 | if not shelvedfile(repo, basename, patchextension).exists(): |
|
1131 | 1122 | raise error.Abort(_(b"shelved change '%s' not found") % basename) |
|
1132 | 1123 | |
|
1124 | return _dounshelve(ui, repo, basename, opts) | |
|
1125 | ||
|
1126 | ||
|
1127 | def _dounshelve(ui, repo, basename, opts): | |
|
1133 | 1128 | repo = repo.unfiltered() |
|
1134 | 1129 | lock = tr = None |
|
1135 | 1130 | try: |
@@ -137,7 +137,7 b' def _buildencodefun():' | |||
|
137 | 137 | asciistr = list(map(xchr, range(127))) |
|
138 | 138 | capitals = list(range(ord(b"A"), ord(b"Z") + 1)) |
|
139 | 139 | |
|
140 |
cmap = |
|
|
140 | cmap = {x: x for x in asciistr} | |
|
141 | 141 | for x in _reserved(): |
|
142 | 142 | cmap[xchr(x)] = b"~%02x" % x |
|
143 | 143 | for x in capitals + [ord(e)]: |
@@ -200,7 +200,7 b' def _buildlowerencodefun():' | |||
|
200 | 200 | 'the~07quick~adshot' |
|
201 | 201 | ''' |
|
202 | 202 | xchr = pycompat.bytechr |
|
203 |
cmap = |
|
|
203 | cmap = {xchr(x): xchr(x) for x in pycompat.xrange(127)} | |
|
204 | 204 | for x in _reserved(): |
|
205 | 205 | cmap[xchr(x)] = b"~%02x" % x |
|
206 | 206 | for x in range(ord(b"A"), ord(b"Z") + 1): |
@@ -806,7 +806,7 b' class hgsubrepo(abstractsubrepo):' | |||
|
806 | 806 | self.ui.debug( |
|
807 | 807 | b'merging subrepository "%s"\n' % subrelpath(self) |
|
808 | 808 | ) |
|
809 |
hg.merge( |
|
|
809 | hg.merge(dst, remind=False) | |
|
810 | 810 | |
|
811 | 811 | wctx = self._repo[None] |
|
812 | 812 | if self.dirty(): |
@@ -720,15 +720,20 b' class hgtagsfnodescache(object):' | |||
|
720 | 720 | |
|
721 | 721 | self._dirtyoffset = None |
|
722 | 722 | |
|
723 | rawlentokeep = min( | |
|
724 | wantedlen, (rawlen // _fnodesrecsize) * _fnodesrecsize | |
|
725 | ) | |
|
726 | if rawlen > rawlentokeep: | |
|
727 | # There's no easy way to truncate array instances. This seems | |
|
728 | # slightly less evil than copying a potentially large array slice. | |
|
729 | for i in range(rawlen - rawlentokeep): | |
|
730 | self._raw.pop() | |
|
731 | rawlen = len(self._raw) | |
|
732 | self._dirtyoffset = rawlen | |
|
723 | 733 | if rawlen < wantedlen: |
|
734 | if self._dirtyoffset is None: | |
|
724 | 735 | self._dirtyoffset = rawlen |
|
725 | 736 | self._raw.extend(b'\xff' * (wantedlen - rawlen)) |
|
726 | elif rawlen > wantedlen: | |
|
727 | # There's no easy way to truncate array instances. This seems | |
|
728 | # slightly less evil than copying a potentially large array slice. | |
|
729 | for i in range(rawlen - wantedlen): | |
|
730 | self._raw.pop() | |
|
731 | self._dirtyoffset = len(self._raw) | |
|
732 | 737 | |
|
733 | 738 | def getfnode(self, node, computemissing=True): |
|
734 | 739 | """Obtain the filenode of the .hgtags file at a specified revision. |
@@ -18,6 +18,7 b' from . import (' | |||
|
18 | 18 | node, |
|
19 | 19 | pycompat, |
|
20 | 20 | registrar, |
|
21 | smartset, | |
|
21 | 22 | templateutil, |
|
22 | 23 | url, |
|
23 | 24 | util, |
@@ -105,9 +106,17 b' def basename(path):' | |||
|
105 | 106 | return os.path.basename(path) |
|
106 | 107 | |
|
107 | 108 | |
|
109 | def _tocborencodable(obj): | |
|
110 | if isinstance(obj, smartset.abstractsmartset): | |
|
111 | return list(obj) | |
|
112 | return obj | |
|
113 | ||
|
114 | ||
|
108 | 115 | @templatefilter(b'cbor') |
|
109 | 116 | def cbor(obj): |
|
110 | 117 | """Any object. Serializes the object to CBOR bytes.""" |
|
118 | # cborutil is stricter about type than json() filter | |
|
119 | obj = pycompat.rapply(_tocborencodable, obj) | |
|
111 | 120 | return b''.join(cborutil.streamencode(obj)) |
|
112 | 121 | |
|
113 | 122 |
@@ -16,6 +16,7 b' from .node import (' | |||
|
16 | 16 | ) |
|
17 | 17 | from . import ( |
|
18 | 18 | color, |
|
19 | dagop, | |
|
19 | 20 | diffutil, |
|
20 | 21 | encoding, |
|
21 | 22 | error, |
@@ -658,17 +659,19 b' def revset(context, mapping, args):' | |||
|
658 | 659 | return m(repo) |
|
659 | 660 | |
|
660 | 661 | if len(args) > 1: |
|
662 | key = None # dynamically-created revs shouldn't be cached | |
|
661 | 663 | formatargs = [evalfuncarg(context, mapping, a) for a in args[1:]] |
|
662 | 664 | revs = query(revsetlang.formatspec(raw, *formatargs)) |
|
663 | 665 | else: |
|
664 | 666 | cache = context.resource(mapping, b'cache') |
|
665 | 667 | revsetcache = cache.setdefault(b"revsetcache", {}) |
|
666 | if raw in revsetcache: | |
|
667 |
|
|
|
668 | key = raw | |
|
669 | if key in revsetcache: | |
|
670 | revs = revsetcache[key] | |
|
668 | 671 | else: |
|
669 | 672 | revs = query(raw) |
|
670 |
revsetcache[ |
|
|
671 |
return template |
|
|
673 | revsetcache[key] = revs | |
|
674 | return templateutil.revslist(repo, revs, name=b'revision', cachekey=key) | |
|
672 | 675 | |
|
673 | 676 | |
|
674 | 677 | @templatefunc(b'rstdoc(text, style)') |
@@ -840,6 +843,45 b' def startswith(context, mapping, args):' | |||
|
840 | 843 | return b'' |
|
841 | 844 | |
|
842 | 845 | |
|
846 | @templatefunc( | |
|
847 | b'subsetparents(rev, revset)', | |
|
848 | argspec=b'rev revset', | |
|
849 | requires={b'repo', b'cache'}, | |
|
850 | ) | |
|
851 | def subsetparents(context, mapping, args): | |
|
852 | """Look up parents of the rev in the sub graph given by the revset.""" | |
|
853 | if b'rev' not in args or b'revset' not in args: | |
|
854 | # i18n: "subsetparents" is a keyword | |
|
855 | raise error.ParseError(_(b"subsetparents expects two arguments")) | |
|
856 | ||
|
857 | repo = context.resource(mapping, b'repo') | |
|
858 | ||
|
859 | rev = templateutil.evalinteger(context, mapping, args[b'rev']) | |
|
860 | ||
|
861 | # TODO: maybe subsetparents(rev) should be allowed. the default revset | |
|
862 | # will be the revisions specified by -rREV argument. | |
|
863 | q = templateutil.evalwrapped(context, mapping, args[b'revset']) | |
|
864 | if not isinstance(q, templateutil.revslist): | |
|
865 | # i18n: "subsetparents" is a keyword | |
|
866 | raise error.ParseError(_(b"subsetparents expects a queried revset")) | |
|
867 | subset = q.tovalue(context, mapping) | |
|
868 | key = q.cachekey | |
|
869 | ||
|
870 | if key: | |
|
871 | # cache only if revset query isn't dynamic | |
|
872 | cache = context.resource(mapping, b'cache') | |
|
873 | walkercache = cache.setdefault(b'subsetparentswalker', {}) | |
|
874 | if key in walkercache: | |
|
875 | walker = walkercache[key] | |
|
876 | else: | |
|
877 | walker = dagop.subsetparentswalker(repo, subset) | |
|
878 | walkercache[key] = walker | |
|
879 | else: | |
|
880 | # for one-shot use, specify startrev to limit the search space | |
|
881 | walker = dagop.subsetparentswalker(repo, subset, startrev=rev) | |
|
882 | return templateutil.revslist(repo, walker.parentsset(rev)) | |
|
883 | ||
|
884 | ||
|
843 | 885 | @templatefunc(b'word(number, text[, separator])') |
|
844 | 886 | def word(context, mapping, args): |
|
845 | 887 | """Return the nth word from a string.""" |
@@ -396,26 +396,40 b' def showfiles(context, mapping):' | |||
|
396 | 396 | return templateutil.compatfileslist(context, mapping, b'file', ctx.files()) |
|
397 | 397 | |
|
398 | 398 | |
|
399 | @templatekeyword(b'graphnode', requires={b'repo', b'ctx'}) | |
|
399 | @templatekeyword(b'graphnode', requires={b'repo', b'ctx', b'cache'}) | |
|
400 | 400 | def showgraphnode(context, mapping): |
|
401 | 401 | """String. The character representing the changeset node in an ASCII |
|
402 | 402 | revision graph.""" |
|
403 | 403 | repo = context.resource(mapping, b'repo') |
|
404 | 404 | ctx = context.resource(mapping, b'ctx') |
|
405 | return getgraphnode(repo, ctx) | |
|
405 | cache = context.resource(mapping, b'cache') | |
|
406 | return getgraphnode(repo, ctx, cache) | |
|
406 | 407 | |
|
407 | 408 | |
|
408 | def getgraphnode(repo, ctx): | |
|
409 | return getgraphnodecurrent(repo, ctx) or getgraphnodesymbol(ctx) | |
|
409 | def getgraphnode(repo, ctx, cache): | |
|
410 | return getgraphnodecurrent(repo, ctx, cache) or getgraphnodesymbol(ctx) | |
|
410 | 411 | |
|
411 | 412 | |
|
412 | def getgraphnodecurrent(repo, ctx): | |
|
413 | def getgraphnodecurrent(repo, ctx, cache): | |
|
413 | 414 | wpnodes = repo.dirstate.parents() |
|
414 | 415 | if wpnodes[1] == nullid: |
|
415 | 416 | wpnodes = wpnodes[:1] |
|
416 | 417 | if ctx.node() in wpnodes: |
|
417 | 418 | return b'@' |
|
418 | 419 | else: |
|
420 | merge_nodes = cache.get(b'merge_nodes') | |
|
421 | if merge_nodes is None: | |
|
422 | from . import merge | |
|
423 | ||
|
424 | mergestate = merge.mergestate.read(repo) | |
|
425 | if mergestate.active(): | |
|
426 | merge_nodes = (mergestate.local, mergestate.other) | |
|
427 | else: | |
|
428 | merge_nodes = () | |
|
429 | cache[b'merge_nodes'] = merge_nodes | |
|
430 | ||
|
431 | if ctx.node() in merge_nodes: | |
|
432 | return b'%' | |
|
419 | 433 | return b'' |
|
420 | 434 | |
|
421 | 435 | |
@@ -548,7 +562,11 b' def shownames(context, mapping, namespac' | |||
|
548 | 562 | """helper method to generate a template keyword for a namespace""" |
|
549 | 563 | repo = context.resource(mapping, b'repo') |
|
550 | 564 | ctx = context.resource(mapping, b'ctx') |
|
551 |
ns = repo.names |
|
|
565 | ns = repo.names.get(namespace) | |
|
566 | if ns is None: | |
|
567 | # namespaces.addnamespace() registers new template keyword, but | |
|
568 | # the registered namespace might not exist in the current repo. | |
|
569 | return | |
|
552 | 570 | names = ns.names(repo, ctx.node()) |
|
553 | 571 | return compatlist( |
|
554 | 572 | context, mapping, ns.templatename, names, plural=namespace |
@@ -861,24 +879,6 b' def showrev(context, mapping):' | |||
|
861 | 879 | return scmutil.intrev(ctx) |
|
862 | 880 | |
|
863 | 881 | |
|
864 | def showrevslist(context, mapping, name, revs): | |
|
865 | """helper to generate a list of revisions in which a mapped template will | |
|
866 | be evaluated""" | |
|
867 | repo = context.resource(mapping, b'repo') | |
|
868 | # revs may be a smartset; don't compute it until f() has to be evaluated | |
|
869 | def f(): | |
|
870 | srevs = [b'%d' % r for r in revs] | |
|
871 | return _showcompatlist(context, mapping, name, srevs) | |
|
872 | ||
|
873 | return _hybrid( | |
|
874 | f, | |
|
875 | revs, | |
|
876 | lambda x: {name: x, b'ctx': repo[x]}, | |
|
877 | pycompat.identity, | |
|
878 | keytype=int, | |
|
879 | ) | |
|
880 | ||
|
881 | ||
|
882 | 882 | @templatekeyword(b'subrepos', requires={b'ctx'}) |
|
883 | 883 | def showsubrepos(context, mapping): |
|
884 | 884 | """List of strings. Updated subrepositories in the changeset.""" |
@@ -45,6 +45,9 b' hybrid' | |||
|
45 | 45 | hybriditem |
|
46 | 46 | represents a scalar printable value, also supports % operator. |
|
47 | 47 | |
|
48 | revslist | |
|
49 | represents a list of revision numbers. | |
|
50 | ||
|
48 | 51 | mappinggenerator, mappinglist |
|
49 | 52 | represents mappings (i.e. a list of dicts), which may have default |
|
50 | 53 | output format. |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: file copied from tests/test-rename.t to tests/test-rename-rev.t | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: file was removed | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: file was removed | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: file was removed | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: file was removed | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: file was removed | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: file was removed | |
The requested commit or file is too big and content was truncated. Show full diff |
General Comments 0
You need to be logged in to leave comments.
Login now