Show More
The requested changes are too big and content was truncated. Show full diff
@@ -1,343 +1,343 | |||
|
1 | 1 | # narrowbundle2.py - bundle2 extensions for narrow repository support |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2017 Google, Inc. |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | from __future__ import absolute_import |
|
9 | 9 | |
|
10 | 10 | import errno |
|
11 | 11 | import struct |
|
12 | 12 | |
|
13 | 13 | from mercurial.i18n import _ |
|
14 | 14 | from mercurial.node import nullid |
|
15 | 15 | from mercurial import ( |
|
16 | 16 | bundle2, |
|
17 | 17 | changegroup, |
|
18 | 18 | error, |
|
19 | 19 | exchange, |
|
20 | 20 | localrepo, |
|
21 | 21 | narrowspec, |
|
22 | 22 | repair, |
|
23 | 23 | requirements, |
|
24 | 24 | scmutil, |
|
25 | 25 | util, |
|
26 | 26 | wireprototypes, |
|
27 | 27 | ) |
|
28 | 28 | from mercurial.utils import stringutil |
|
29 | 29 | |
|
30 | 30 | _NARROWACL_SECTION = b'narrowacl' |
|
31 | 31 | _CHANGESPECPART = b'narrow:changespec' |
|
32 | 32 | _RESSPECS = b'narrow:responsespec' |
|
33 | 33 | _SPECPART = b'narrow:spec' |
|
34 | 34 | _SPECPART_INCLUDE = b'include' |
|
35 | 35 | _SPECPART_EXCLUDE = b'exclude' |
|
36 | 36 | _KILLNODESIGNAL = b'KILL' |
|
37 | 37 | _DONESIGNAL = b'DONE' |
|
38 | 38 | _ELIDEDCSHEADER = b'>20s20s20sl' # cset id, p1, p2, len(text) |
|
39 | 39 | _ELIDEDMFHEADER = b'>20s20s20s20sl' # manifest id, p1, p2, link id, len(text) |
|
40 | 40 | _CSHEADERSIZE = struct.calcsize(_ELIDEDCSHEADER) |
|
41 | 41 | _MFHEADERSIZE = struct.calcsize(_ELIDEDMFHEADER) |
|
42 | 42 | |
|
43 | 43 | # Serve a changegroup for a client with a narrow clone. |
|
44 | 44 | def getbundlechangegrouppart_narrow( |
|
45 | 45 | bundler, |
|
46 | 46 | repo, |
|
47 | 47 | source, |
|
48 | 48 | bundlecaps=None, |
|
49 | 49 | b2caps=None, |
|
50 | 50 | heads=None, |
|
51 | 51 | common=None, |
|
52 | 52 | **kwargs |
|
53 | 53 | ): |
|
54 | 54 | assert repo.ui.configbool(b'experimental', b'narrowservebrokenellipses') |
|
55 | 55 | |
|
56 | 56 | cgversions = b2caps.get(b'changegroup') |
|
57 | 57 | cgversions = [ |
|
58 | 58 | v |
|
59 | 59 | for v in cgversions |
|
60 | 60 | if v in changegroup.supportedoutgoingversions(repo) |
|
61 | 61 | ] |
|
62 | 62 | if not cgversions: |
|
63 | 63 | raise ValueError(_(b'no common changegroup version')) |
|
64 | 64 | version = max(cgversions) |
|
65 | 65 | |
|
66 | 66 | include = sorted(filter(bool, kwargs.get('includepats', []))) |
|
67 | 67 | exclude = sorted(filter(bool, kwargs.get('excludepats', []))) |
|
68 | 68 | generateellipsesbundle2( |
|
69 | 69 | bundler, |
|
70 | 70 | repo, |
|
71 | 71 | include, |
|
72 | 72 | exclude, |
|
73 | 73 | version, |
|
74 | 74 | common, |
|
75 | 75 | heads, |
|
76 | 76 | kwargs.get('depth', None), |
|
77 | 77 | ) |
|
78 | 78 | |
|
79 | 79 | |
|
80 | 80 | def generateellipsesbundle2( |
|
81 | 81 | bundler, repo, include, exclude, version, common, heads, depth, |
|
82 | 82 | ): |
|
83 | 83 | match = narrowspec.match(repo.root, include=include, exclude=exclude) |
|
84 | 84 | if depth is not None: |
|
85 | 85 | depth = int(depth) |
|
86 | 86 | if depth < 1: |
|
87 | 87 | raise error.Abort(_(b'depth must be positive, got %d') % depth) |
|
88 | 88 | |
|
89 | 89 | heads = set(heads or repo.heads()) |
|
90 | 90 | common = set(common or [nullid]) |
|
91 | 91 | |
|
92 | 92 | visitnodes, relevant_nodes, ellipsisroots = exchange._computeellipsis( |
|
93 | 93 | repo, common, heads, set(), match, depth=depth |
|
94 | 94 | ) |
|
95 | 95 | |
|
96 | 96 | repo.ui.debug(b'Found %d relevant revs\n' % len(relevant_nodes)) |
|
97 | 97 | if visitnodes: |
|
98 | 98 | packer = changegroup.getbundler( |
|
99 | 99 | version, |
|
100 | 100 | repo, |
|
101 | 101 | matcher=match, |
|
102 | 102 | ellipses=True, |
|
103 | 103 | shallow=depth is not None, |
|
104 | 104 | ellipsisroots=ellipsisroots, |
|
105 | 105 | fullnodes=relevant_nodes, |
|
106 | 106 | ) |
|
107 | 107 | cgdata = packer.generate(common, visitnodes, False, b'narrow_widen') |
|
108 | 108 | |
|
109 | 109 | part = bundler.newpart(b'changegroup', data=cgdata) |
|
110 | 110 | part.addparam(b'version', version) |
|
111 | if requirements.TREEMANIFEST_REQUIREMENT in repo.requirements: | |
|
111 | if scmutil.istreemanifest(repo): | |
|
112 | 112 | part.addparam(b'treemanifest', b'1') |
|
113 | 113 | |
|
114 | 114 | |
|
115 | 115 | def generate_ellipses_bundle2_for_widening( |
|
116 | 116 | bundler, repo, oldmatch, newmatch, version, common, known, |
|
117 | 117 | ): |
|
118 | 118 | common = set(common or [nullid]) |
|
119 | 119 | # Steps: |
|
120 | 120 | # 1. Send kill for "$known & ::common" |
|
121 | 121 | # |
|
122 | 122 | # 2. Send changegroup for ::common |
|
123 | 123 | # |
|
124 | 124 | # 3. Proceed. |
|
125 | 125 | # |
|
126 | 126 | # In the future, we can send kills for only the specific |
|
127 | 127 | # nodes we know should go away or change shape, and then |
|
128 | 128 | # send a data stream that tells the client something like this: |
|
129 | 129 | # |
|
130 | 130 | # a) apply this changegroup |
|
131 | 131 | # b) apply nodes XXX, YYY, ZZZ that you already have |
|
132 | 132 | # c) goto a |
|
133 | 133 | # |
|
134 | 134 | # until they've built up the full new state. |
|
135 | 135 | knownrevs = {repo.changelog.rev(n) for n in known} |
|
136 | 136 | # TODO: we could send only roots() of this set, and the |
|
137 | 137 | # list of nodes in common, and the client could work out |
|
138 | 138 | # what to strip, instead of us explicitly sending every |
|
139 | 139 | # single node. |
|
140 | 140 | deadrevs = knownrevs |
|
141 | 141 | |
|
142 | 142 | def genkills(): |
|
143 | 143 | for r in deadrevs: |
|
144 | 144 | yield _KILLNODESIGNAL |
|
145 | 145 | yield repo.changelog.node(r) |
|
146 | 146 | yield _DONESIGNAL |
|
147 | 147 | |
|
148 | 148 | bundler.newpart(_CHANGESPECPART, data=genkills()) |
|
149 | 149 | newvisit, newfull, newellipsis = exchange._computeellipsis( |
|
150 | 150 | repo, set(), common, knownrevs, newmatch |
|
151 | 151 | ) |
|
152 | 152 | if newvisit: |
|
153 | 153 | packer = changegroup.getbundler( |
|
154 | 154 | version, |
|
155 | 155 | repo, |
|
156 | 156 | matcher=newmatch, |
|
157 | 157 | ellipses=True, |
|
158 | 158 | shallow=False, |
|
159 | 159 | ellipsisroots=newellipsis, |
|
160 | 160 | fullnodes=newfull, |
|
161 | 161 | ) |
|
162 | 162 | cgdata = packer.generate(common, newvisit, False, b'narrow_widen') |
|
163 | 163 | |
|
164 | 164 | part = bundler.newpart(b'changegroup', data=cgdata) |
|
165 | 165 | part.addparam(b'version', version) |
|
166 | if requirements.TREEMANIFEST_REQUIREMENT in repo.requirements: | |
|
166 | if scmutil.istreemanifest(repo): | |
|
167 | 167 | part.addparam(b'treemanifest', b'1') |
|
168 | 168 | |
|
169 | 169 | |
|
170 | 170 | @bundle2.parthandler(_SPECPART, (_SPECPART_INCLUDE, _SPECPART_EXCLUDE)) |
|
171 | 171 | def _handlechangespec_2(op, inpart): |
|
172 | 172 | # XXX: This bundle2 handling is buggy and should be removed after hg5.2 is |
|
173 | 173 | # released. New servers will send a mandatory bundle2 part named |
|
174 | 174 | # 'Narrowspec' and will send specs as data instead of params. |
|
175 | 175 | # Refer to issue5952 and 6019 |
|
176 | 176 | includepats = set(inpart.params.get(_SPECPART_INCLUDE, b'').splitlines()) |
|
177 | 177 | excludepats = set(inpart.params.get(_SPECPART_EXCLUDE, b'').splitlines()) |
|
178 | 178 | narrowspec.validatepatterns(includepats) |
|
179 | 179 | narrowspec.validatepatterns(excludepats) |
|
180 | 180 | |
|
181 | 181 | if not requirements.NARROW_REQUIREMENT in op.repo.requirements: |
|
182 | 182 | op.repo.requirements.add(requirements.NARROW_REQUIREMENT) |
|
183 | 183 | scmutil.writereporequirements(op.repo) |
|
184 | 184 | op.repo.setnarrowpats(includepats, excludepats) |
|
185 | 185 | narrowspec.copytoworkingcopy(op.repo) |
|
186 | 186 | |
|
187 | 187 | |
|
188 | 188 | @bundle2.parthandler(_RESSPECS) |
|
189 | 189 | def _handlenarrowspecs(op, inpart): |
|
190 | 190 | data = inpart.read() |
|
191 | 191 | inc, exc = data.split(b'\0') |
|
192 | 192 | includepats = set(inc.splitlines()) |
|
193 | 193 | excludepats = set(exc.splitlines()) |
|
194 | 194 | narrowspec.validatepatterns(includepats) |
|
195 | 195 | narrowspec.validatepatterns(excludepats) |
|
196 | 196 | |
|
197 | 197 | if requirements.NARROW_REQUIREMENT not in op.repo.requirements: |
|
198 | 198 | op.repo.requirements.add(requirements.NARROW_REQUIREMENT) |
|
199 | 199 | scmutil.writereporequirements(op.repo) |
|
200 | 200 | op.repo.setnarrowpats(includepats, excludepats) |
|
201 | 201 | narrowspec.copytoworkingcopy(op.repo) |
|
202 | 202 | |
|
203 | 203 | |
|
204 | 204 | @bundle2.parthandler(_CHANGESPECPART) |
|
205 | 205 | def _handlechangespec(op, inpart): |
|
206 | 206 | repo = op.repo |
|
207 | 207 | cl = repo.changelog |
|
208 | 208 | |
|
209 | 209 | # changesets which need to be stripped entirely. either they're no longer |
|
210 | 210 | # needed in the new narrow spec, or the server is sending a replacement |
|
211 | 211 | # in the changegroup part. |
|
212 | 212 | clkills = set() |
|
213 | 213 | |
|
214 | 214 | # A changespec part contains all the updates to ellipsis nodes |
|
215 | 215 | # that will happen as a result of widening or narrowing a |
|
216 | 216 | # repo. All the changes that this block encounters are ellipsis |
|
217 | 217 | # nodes or flags to kill an existing ellipsis. |
|
218 | 218 | chunksignal = changegroup.readexactly(inpart, 4) |
|
219 | 219 | while chunksignal != _DONESIGNAL: |
|
220 | 220 | if chunksignal == _KILLNODESIGNAL: |
|
221 | 221 | # a node used to be an ellipsis but isn't anymore |
|
222 | 222 | ck = changegroup.readexactly(inpart, 20) |
|
223 | 223 | if cl.hasnode(ck): |
|
224 | 224 | clkills.add(ck) |
|
225 | 225 | else: |
|
226 | 226 | raise error.Abort( |
|
227 | 227 | _(b'unexpected changespec node chunk type: %s') % chunksignal |
|
228 | 228 | ) |
|
229 | 229 | chunksignal = changegroup.readexactly(inpart, 4) |
|
230 | 230 | |
|
231 | 231 | if clkills: |
|
232 | 232 | # preserve bookmarks that repair.strip() would otherwise strip |
|
233 | 233 | op._bookmarksbackup = repo._bookmarks |
|
234 | 234 | |
|
235 | 235 | class dummybmstore(dict): |
|
236 | 236 | def applychanges(self, repo, tr, changes): |
|
237 | 237 | pass |
|
238 | 238 | |
|
239 | 239 | localrepo.localrepository._bookmarks.set(repo, dummybmstore()) |
|
240 | 240 | chgrpfile = repair.strip( |
|
241 | 241 | op.ui, repo, list(clkills), backup=True, topic=b'widen' |
|
242 | 242 | ) |
|
243 | 243 | if chgrpfile: |
|
244 | 244 | op._widen_uninterr = repo.ui.uninterruptible() |
|
245 | 245 | op._widen_uninterr.__enter__() |
|
246 | 246 | # presence of _widen_bundle attribute activates widen handler later |
|
247 | 247 | op._widen_bundle = chgrpfile |
|
248 | 248 | # Set the new narrowspec if we're widening. The setnewnarrowpats() method |
|
249 | 249 | # will currently always be there when using the core+narrowhg server, but |
|
250 | 250 | # other servers may include a changespec part even when not widening (e.g. |
|
251 | 251 | # because we're deepening a shallow repo). |
|
252 | 252 | if util.safehasattr(repo, 'setnewnarrowpats'): |
|
253 | 253 | repo.setnewnarrowpats() |
|
254 | 254 | |
|
255 | 255 | |
|
256 | 256 | def handlechangegroup_widen(op, inpart): |
|
257 | 257 | """Changegroup exchange handler which restores temporarily-stripped nodes""" |
|
258 | 258 | # We saved a bundle with stripped node data we must now restore. |
|
259 | 259 | # This approach is based on mercurial/repair.py@6ee26a53c111. |
|
260 | 260 | repo = op.repo |
|
261 | 261 | ui = op.ui |
|
262 | 262 | |
|
263 | 263 | chgrpfile = op._widen_bundle |
|
264 | 264 | del op._widen_bundle |
|
265 | 265 | vfs = repo.vfs |
|
266 | 266 | |
|
267 | 267 | ui.note(_(b"adding branch\n")) |
|
268 | 268 | f = vfs.open(chgrpfile, b"rb") |
|
269 | 269 | try: |
|
270 | 270 | gen = exchange.readbundle(ui, f, chgrpfile, vfs) |
|
271 | 271 | # silence internal shuffling chatter |
|
272 | 272 | override = {(b'ui', b'quiet'): True} |
|
273 | 273 | if ui.verbose: |
|
274 | 274 | override = {} |
|
275 | 275 | with ui.configoverride(override): |
|
276 | 276 | if isinstance(gen, bundle2.unbundle20): |
|
277 | 277 | with repo.transaction(b'strip') as tr: |
|
278 | 278 | bundle2.processbundle(repo, gen, lambda: tr) |
|
279 | 279 | else: |
|
280 | 280 | gen.apply( |
|
281 | 281 | repo, b'strip', b'bundle:' + vfs.join(chgrpfile), True |
|
282 | 282 | ) |
|
283 | 283 | finally: |
|
284 | 284 | f.close() |
|
285 | 285 | |
|
286 | 286 | # remove undo files |
|
287 | 287 | for undovfs, undofile in repo.undofiles(): |
|
288 | 288 | try: |
|
289 | 289 | undovfs.unlink(undofile) |
|
290 | 290 | except OSError as e: |
|
291 | 291 | if e.errno != errno.ENOENT: |
|
292 | 292 | ui.warn( |
|
293 | 293 | _(b'error removing %s: %s\n') |
|
294 | 294 | % (undovfs.join(undofile), stringutil.forcebytestr(e)) |
|
295 | 295 | ) |
|
296 | 296 | |
|
297 | 297 | # Remove partial backup only if there were no exceptions |
|
298 | 298 | op._widen_uninterr.__exit__(None, None, None) |
|
299 | 299 | vfs.unlink(chgrpfile) |
|
300 | 300 | |
|
301 | 301 | |
|
302 | 302 | def setup(): |
|
303 | 303 | """Enable narrow repo support in bundle2-related extension points.""" |
|
304 | 304 | getbundleargs = wireprototypes.GETBUNDLE_ARGUMENTS |
|
305 | 305 | |
|
306 | 306 | getbundleargs[b'narrow'] = b'boolean' |
|
307 | 307 | getbundleargs[b'depth'] = b'plain' |
|
308 | 308 | getbundleargs[b'oldincludepats'] = b'csv' |
|
309 | 309 | getbundleargs[b'oldexcludepats'] = b'csv' |
|
310 | 310 | getbundleargs[b'known'] = b'csv' |
|
311 | 311 | |
|
312 | 312 | # Extend changegroup serving to handle requests from narrow clients. |
|
313 | 313 | origcgfn = exchange.getbundle2partsmapping[b'changegroup'] |
|
314 | 314 | |
|
315 | 315 | def wrappedcgfn(*args, **kwargs): |
|
316 | 316 | repo = args[1] |
|
317 | 317 | if repo.ui.has_section(_NARROWACL_SECTION): |
|
318 | 318 | kwargs = exchange.applynarrowacl(repo, kwargs) |
|
319 | 319 | |
|
320 | 320 | if kwargs.get('narrow', False) and repo.ui.configbool( |
|
321 | 321 | b'experimental', b'narrowservebrokenellipses' |
|
322 | 322 | ): |
|
323 | 323 | getbundlechangegrouppart_narrow(*args, **kwargs) |
|
324 | 324 | else: |
|
325 | 325 | origcgfn(*args, **kwargs) |
|
326 | 326 | |
|
327 | 327 | exchange.getbundle2partsmapping[b'changegroup'] = wrappedcgfn |
|
328 | 328 | |
|
329 | 329 | # Extend changegroup receiver so client can fixup after widen requests. |
|
330 | 330 | origcghandler = bundle2.parthandlermapping[b'changegroup'] |
|
331 | 331 | |
|
332 | 332 | def wrappedcghandler(op, inpart): |
|
333 | 333 | origcghandler(op, inpart) |
|
334 | 334 | if util.safehasattr(op, '_widen_bundle'): |
|
335 | 335 | handlechangegroup_widen(op, inpart) |
|
336 | 336 | if util.safehasattr(op, '_bookmarksbackup'): |
|
337 | 337 | localrepo.localrepository._bookmarks.set( |
|
338 | 338 | op.repo, op._bookmarksbackup |
|
339 | 339 | ) |
|
340 | 340 | del op._bookmarksbackup |
|
341 | 341 | |
|
342 | 342 | wrappedcghandler.params = origcghandler.params |
|
343 | 343 | bundle2.parthandlermapping[b'changegroup'] = wrappedcghandler |
@@ -1,443 +1,443 | |||
|
1 | 1 | # remotefilelogserver.py - server logic for a remotefilelog server |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2013 Facebook, Inc. |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | from __future__ import absolute_import |
|
8 | 8 | |
|
9 | 9 | import errno |
|
10 | 10 | import os |
|
11 | 11 | import stat |
|
12 | 12 | import time |
|
13 | 13 | import zlib |
|
14 | 14 | |
|
15 | 15 | from mercurial.i18n import _ |
|
16 | 16 | from mercurial.node import bin, hex, nullid |
|
17 | 17 | from mercurial.pycompat import open |
|
18 | 18 | from mercurial import ( |
|
19 | 19 | changegroup, |
|
20 | 20 | changelog, |
|
21 | 21 | context, |
|
22 | 22 | error, |
|
23 | 23 | extensions, |
|
24 | 24 | match, |
|
25 | 25 | pycompat, |
|
26 | requirements, | |
|
26 | scmutil, | |
|
27 | 27 | store, |
|
28 | 28 | streamclone, |
|
29 | 29 | util, |
|
30 | 30 | wireprotoserver, |
|
31 | 31 | wireprototypes, |
|
32 | 32 | wireprotov1server, |
|
33 | 33 | ) |
|
34 | 34 | from . import ( |
|
35 | 35 | constants, |
|
36 | 36 | shallowutil, |
|
37 | 37 | ) |
|
38 | 38 | |
|
39 | 39 | _sshv1server = wireprotoserver.sshv1protocolhandler |
|
40 | 40 | |
|
41 | 41 | |
|
42 | 42 | def setupserver(ui, repo): |
|
43 | 43 | """Sets up a normal Mercurial repo so it can serve files to shallow repos. |
|
44 | 44 | """ |
|
45 | 45 | onetimesetup(ui) |
|
46 | 46 | |
|
47 | 47 | # don't send files to shallow clients during pulls |
|
48 | 48 | def generatefiles( |
|
49 | 49 | orig, self, changedfiles, linknodes, commonrevs, source, *args, **kwargs |
|
50 | 50 | ): |
|
51 | 51 | caps = self._bundlecaps or [] |
|
52 | 52 | if constants.BUNDLE2_CAPABLITY in caps: |
|
53 | 53 | # only send files that don't match the specified patterns |
|
54 | 54 | includepattern = None |
|
55 | 55 | excludepattern = None |
|
56 | 56 | for cap in self._bundlecaps or []: |
|
57 | 57 | if cap.startswith(b"includepattern="): |
|
58 | 58 | includepattern = cap[len(b"includepattern=") :].split(b'\0') |
|
59 | 59 | elif cap.startswith(b"excludepattern="): |
|
60 | 60 | excludepattern = cap[len(b"excludepattern=") :].split(b'\0') |
|
61 | 61 | |
|
62 | 62 | m = match.always() |
|
63 | 63 | if includepattern or excludepattern: |
|
64 | 64 | m = match.match( |
|
65 | 65 | repo.root, b'', None, includepattern, excludepattern |
|
66 | 66 | ) |
|
67 | 67 | |
|
68 | 68 | changedfiles = list([f for f in changedfiles if not m(f)]) |
|
69 | 69 | return orig( |
|
70 | 70 | self, changedfiles, linknodes, commonrevs, source, *args, **kwargs |
|
71 | 71 | ) |
|
72 | 72 | |
|
73 | 73 | extensions.wrapfunction( |
|
74 | 74 | changegroup.cgpacker, b'generatefiles', generatefiles |
|
75 | 75 | ) |
|
76 | 76 | |
|
77 | 77 | |
|
78 | 78 | onetime = False |
|
79 | 79 | |
|
80 | 80 | |
|
81 | 81 | def onetimesetup(ui): |
|
82 | 82 | """Configures the wireprotocol for both clients and servers. |
|
83 | 83 | """ |
|
84 | 84 | global onetime |
|
85 | 85 | if onetime: |
|
86 | 86 | return |
|
87 | 87 | onetime = True |
|
88 | 88 | |
|
89 | 89 | # support file content requests |
|
90 | 90 | wireprotov1server.wireprotocommand( |
|
91 | 91 | b'x_rfl_getflogheads', b'path', permission=b'pull' |
|
92 | 92 | )(getflogheads) |
|
93 | 93 | wireprotov1server.wireprotocommand( |
|
94 | 94 | b'x_rfl_getfiles', b'', permission=b'pull' |
|
95 | 95 | )(getfiles) |
|
96 | 96 | wireprotov1server.wireprotocommand( |
|
97 | 97 | b'x_rfl_getfile', b'file node', permission=b'pull' |
|
98 | 98 | )(getfile) |
|
99 | 99 | |
|
100 | 100 | class streamstate(object): |
|
101 | 101 | match = None |
|
102 | 102 | shallowremote = False |
|
103 | 103 | noflatmf = False |
|
104 | 104 | |
|
105 | 105 | state = streamstate() |
|
106 | 106 | |
|
107 | 107 | def stream_out_shallow(repo, proto, other): |
|
108 | 108 | includepattern = None |
|
109 | 109 | excludepattern = None |
|
110 | 110 | raw = other.get(b'includepattern') |
|
111 | 111 | if raw: |
|
112 | 112 | includepattern = raw.split(b'\0') |
|
113 | 113 | raw = other.get(b'excludepattern') |
|
114 | 114 | if raw: |
|
115 | 115 | excludepattern = raw.split(b'\0') |
|
116 | 116 | |
|
117 | 117 | oldshallow = state.shallowremote |
|
118 | 118 | oldmatch = state.match |
|
119 | 119 | oldnoflatmf = state.noflatmf |
|
120 | 120 | try: |
|
121 | 121 | state.shallowremote = True |
|
122 | 122 | state.match = match.always() |
|
123 | 123 | state.noflatmf = other.get(b'noflatmanifest') == b'True' |
|
124 | 124 | if includepattern or excludepattern: |
|
125 | 125 | state.match = match.match( |
|
126 | 126 | repo.root, b'', None, includepattern, excludepattern |
|
127 | 127 | ) |
|
128 | 128 | streamres = wireprotov1server.stream(repo, proto) |
|
129 | 129 | |
|
130 | 130 | # Force the first value to execute, so the file list is computed |
|
131 | 131 | # within the try/finally scope |
|
132 | 132 | first = next(streamres.gen) |
|
133 | 133 | second = next(streamres.gen) |
|
134 | 134 | |
|
135 | 135 | def gen(): |
|
136 | 136 | yield first |
|
137 | 137 | yield second |
|
138 | 138 | for value in streamres.gen: |
|
139 | 139 | yield value |
|
140 | 140 | |
|
141 | 141 | return wireprototypes.streamres(gen()) |
|
142 | 142 | finally: |
|
143 | 143 | state.shallowremote = oldshallow |
|
144 | 144 | state.match = oldmatch |
|
145 | 145 | state.noflatmf = oldnoflatmf |
|
146 | 146 | |
|
147 | 147 | wireprotov1server.commands[b'stream_out_shallow'] = ( |
|
148 | 148 | stream_out_shallow, |
|
149 | 149 | b'*', |
|
150 | 150 | ) |
|
151 | 151 | |
|
152 | 152 | # don't clone filelogs to shallow clients |
|
153 | 153 | def _walkstreamfiles(orig, repo, matcher=None): |
|
154 | 154 | if state.shallowremote: |
|
155 | 155 | # if we are shallow ourselves, stream our local commits |
|
156 | 156 | if shallowutil.isenabled(repo): |
|
157 | 157 | striplen = len(repo.store.path) + 1 |
|
158 | 158 | readdir = repo.store.rawvfs.readdir |
|
159 | 159 | visit = [os.path.join(repo.store.path, b'data')] |
|
160 | 160 | while visit: |
|
161 | 161 | p = visit.pop() |
|
162 | 162 | for f, kind, st in readdir(p, stat=True): |
|
163 | 163 | fp = p + b'/' + f |
|
164 | 164 | if kind == stat.S_IFREG: |
|
165 | 165 | if not fp.endswith(b'.i') and not fp.endswith( |
|
166 | 166 | b'.d' |
|
167 | 167 | ): |
|
168 | 168 | n = util.pconvert(fp[striplen:]) |
|
169 | 169 | yield (store.decodedir(n), n, st.st_size) |
|
170 | 170 | if kind == stat.S_IFDIR: |
|
171 | 171 | visit.append(fp) |
|
172 | 172 | |
|
173 | if requirements.TREEMANIFEST_REQUIREMENT in repo.requirements: | |
|
173 | if scmutil.istreemanifest(repo): | |
|
174 | 174 | for (u, e, s) in repo.store.datafiles(): |
|
175 | 175 | if u.startswith(b'meta/') and ( |
|
176 | 176 | u.endswith(b'.i') or u.endswith(b'.d') |
|
177 | 177 | ): |
|
178 | 178 | yield (u, e, s) |
|
179 | 179 | |
|
180 | 180 | # Return .d and .i files that do not match the shallow pattern |
|
181 | 181 | match = state.match |
|
182 | 182 | if match and not match.always(): |
|
183 | 183 | for (u, e, s) in repo.store.datafiles(): |
|
184 | 184 | f = u[5:-2] # trim data/... and .i/.d |
|
185 | 185 | if not state.match(f): |
|
186 | 186 | yield (u, e, s) |
|
187 | 187 | |
|
188 | 188 | for x in repo.store.topfiles(): |
|
189 | 189 | if state.noflatmf and x[0][:11] == b'00manifest.': |
|
190 | 190 | continue |
|
191 | 191 | yield x |
|
192 | 192 | |
|
193 | 193 | elif shallowutil.isenabled(repo): |
|
194 | 194 | # don't allow cloning from a shallow repo to a full repo |
|
195 | 195 | # since it would require fetching every version of every |
|
196 | 196 | # file in order to create the revlogs. |
|
197 | 197 | raise error.Abort( |
|
198 | 198 | _(b"Cannot clone from a shallow repo to a full repo.") |
|
199 | 199 | ) |
|
200 | 200 | else: |
|
201 | 201 | for x in orig(repo, matcher): |
|
202 | 202 | yield x |
|
203 | 203 | |
|
204 | 204 | extensions.wrapfunction(streamclone, b'_walkstreamfiles', _walkstreamfiles) |
|
205 | 205 | |
|
206 | 206 | # expose remotefilelog capabilities |
|
207 | 207 | def _capabilities(orig, repo, proto): |
|
208 | 208 | caps = orig(repo, proto) |
|
209 | 209 | if shallowutil.isenabled(repo) or ui.configbool( |
|
210 | 210 | b'remotefilelog', b'server' |
|
211 | 211 | ): |
|
212 | 212 | if isinstance(proto, _sshv1server): |
|
213 | 213 | # legacy getfiles method which only works over ssh |
|
214 | 214 | caps.append(constants.NETWORK_CAP_LEGACY_SSH_GETFILES) |
|
215 | 215 | caps.append(b'x_rfl_getflogheads') |
|
216 | 216 | caps.append(b'x_rfl_getfile') |
|
217 | 217 | return caps |
|
218 | 218 | |
|
219 | 219 | extensions.wrapfunction(wireprotov1server, b'_capabilities', _capabilities) |
|
220 | 220 | |
|
221 | 221 | def _adjustlinkrev(orig, self, *args, **kwargs): |
|
222 | 222 | # When generating file blobs, taking the real path is too slow on large |
|
223 | 223 | # repos, so force it to just return the linkrev directly. |
|
224 | 224 | repo = self._repo |
|
225 | 225 | if util.safehasattr(repo, b'forcelinkrev') and repo.forcelinkrev: |
|
226 | 226 | return self._filelog.linkrev(self._filelog.rev(self._filenode)) |
|
227 | 227 | return orig(self, *args, **kwargs) |
|
228 | 228 | |
|
229 | 229 | extensions.wrapfunction( |
|
230 | 230 | context.basefilectx, b'_adjustlinkrev', _adjustlinkrev |
|
231 | 231 | ) |
|
232 | 232 | |
|
233 | 233 | def _iscmd(orig, cmd): |
|
234 | 234 | if cmd == b'x_rfl_getfiles': |
|
235 | 235 | return False |
|
236 | 236 | return orig(cmd) |
|
237 | 237 | |
|
238 | 238 | extensions.wrapfunction(wireprotoserver, b'iscmd', _iscmd) |
|
239 | 239 | |
|
240 | 240 | |
|
241 | 241 | def _loadfileblob(repo, cachepath, path, node): |
|
242 | 242 | filecachepath = os.path.join(cachepath, path, hex(node)) |
|
243 | 243 | if not os.path.exists(filecachepath) or os.path.getsize(filecachepath) == 0: |
|
244 | 244 | filectx = repo.filectx(path, fileid=node) |
|
245 | 245 | if filectx.node() == nullid: |
|
246 | 246 | repo.changelog = changelog.changelog(repo.svfs) |
|
247 | 247 | filectx = repo.filectx(path, fileid=node) |
|
248 | 248 | |
|
249 | 249 | text = createfileblob(filectx) |
|
250 | 250 | # TODO configurable compression engines |
|
251 | 251 | text = zlib.compress(text) |
|
252 | 252 | |
|
253 | 253 | # everything should be user & group read/writable |
|
254 | 254 | oldumask = os.umask(0o002) |
|
255 | 255 | try: |
|
256 | 256 | dirname = os.path.dirname(filecachepath) |
|
257 | 257 | if not os.path.exists(dirname): |
|
258 | 258 | try: |
|
259 | 259 | os.makedirs(dirname) |
|
260 | 260 | except OSError as ex: |
|
261 | 261 | if ex.errno != errno.EEXIST: |
|
262 | 262 | raise |
|
263 | 263 | |
|
264 | 264 | f = None |
|
265 | 265 | try: |
|
266 | 266 | f = util.atomictempfile(filecachepath, b"wb") |
|
267 | 267 | f.write(text) |
|
268 | 268 | except (IOError, OSError): |
|
269 | 269 | # Don't abort if the user only has permission to read, |
|
270 | 270 | # and not write. |
|
271 | 271 | pass |
|
272 | 272 | finally: |
|
273 | 273 | if f: |
|
274 | 274 | f.close() |
|
275 | 275 | finally: |
|
276 | 276 | os.umask(oldumask) |
|
277 | 277 | else: |
|
278 | 278 | with open(filecachepath, b"rb") as f: |
|
279 | 279 | text = f.read() |
|
280 | 280 | return text |
|
281 | 281 | |
|
282 | 282 | |
|
283 | 283 | def getflogheads(repo, proto, path): |
|
284 | 284 | """A server api for requesting a filelog's heads |
|
285 | 285 | """ |
|
286 | 286 | flog = repo.file(path) |
|
287 | 287 | heads = flog.heads() |
|
288 | 288 | return b'\n'.join((hex(head) for head in heads if head != nullid)) |
|
289 | 289 | |
|
290 | 290 | |
|
291 | 291 | def getfile(repo, proto, file, node): |
|
292 | 292 | """A server api for requesting a particular version of a file. Can be used |
|
293 | 293 | in batches to request many files at once. The return protocol is: |
|
294 | 294 | <errorcode>\0<data/errormsg> where <errorcode> is 0 for success or |
|
295 | 295 | non-zero for an error. |
|
296 | 296 | |
|
297 | 297 | data is a compressed blob with revlog flag and ancestors information. See |
|
298 | 298 | createfileblob for its content. |
|
299 | 299 | """ |
|
300 | 300 | if shallowutil.isenabled(repo): |
|
301 | 301 | return b'1\0' + _(b'cannot fetch remote files from shallow repo') |
|
302 | 302 | cachepath = repo.ui.config(b"remotefilelog", b"servercachepath") |
|
303 | 303 | if not cachepath: |
|
304 | 304 | cachepath = os.path.join(repo.path, b"remotefilelogcache") |
|
305 | 305 | node = bin(node.strip()) |
|
306 | 306 | if node == nullid: |
|
307 | 307 | return b'0\0' |
|
308 | 308 | return b'0\0' + _loadfileblob(repo, cachepath, file, node) |
|
309 | 309 | |
|
310 | 310 | |
|
311 | 311 | def getfiles(repo, proto): |
|
312 | 312 | """A server api for requesting particular versions of particular files. |
|
313 | 313 | """ |
|
314 | 314 | if shallowutil.isenabled(repo): |
|
315 | 315 | raise error.Abort(_(b'cannot fetch remote files from shallow repo')) |
|
316 | 316 | if not isinstance(proto, _sshv1server): |
|
317 | 317 | raise error.Abort(_(b'cannot fetch remote files over non-ssh protocol')) |
|
318 | 318 | |
|
319 | 319 | def streamer(): |
|
320 | 320 | fin = proto._fin |
|
321 | 321 | |
|
322 | 322 | cachepath = repo.ui.config(b"remotefilelog", b"servercachepath") |
|
323 | 323 | if not cachepath: |
|
324 | 324 | cachepath = os.path.join(repo.path, b"remotefilelogcache") |
|
325 | 325 | |
|
326 | 326 | while True: |
|
327 | 327 | request = fin.readline()[:-1] |
|
328 | 328 | if not request: |
|
329 | 329 | break |
|
330 | 330 | |
|
331 | 331 | node = bin(request[:40]) |
|
332 | 332 | if node == nullid: |
|
333 | 333 | yield b'0\n' |
|
334 | 334 | continue |
|
335 | 335 | |
|
336 | 336 | path = request[40:] |
|
337 | 337 | |
|
338 | 338 | text = _loadfileblob(repo, cachepath, path, node) |
|
339 | 339 | |
|
340 | 340 | yield b'%d\n%s' % (len(text), text) |
|
341 | 341 | |
|
342 | 342 | # it would be better to only flush after processing a whole batch |
|
343 | 343 | # but currently we don't know if there are more requests coming |
|
344 | 344 | proto._fout.flush() |
|
345 | 345 | |
|
346 | 346 | return wireprototypes.streamres(streamer()) |
|
347 | 347 | |
|
348 | 348 | |
|
349 | 349 | def createfileblob(filectx): |
|
350 | 350 | """ |
|
351 | 351 | format: |
|
352 | 352 | v0: |
|
353 | 353 | str(len(rawtext)) + '\0' + rawtext + ancestortext |
|
354 | 354 | v1: |
|
355 | 355 | 'v1' + '\n' + metalist + '\0' + rawtext + ancestortext |
|
356 | 356 | metalist := metalist + '\n' + meta | meta |
|
357 | 357 | meta := sizemeta | flagmeta |
|
358 | 358 | sizemeta := METAKEYSIZE + str(len(rawtext)) |
|
359 | 359 | flagmeta := METAKEYFLAG + str(flag) |
|
360 | 360 | |
|
361 | 361 | note: sizemeta must exist. METAKEYFLAG and METAKEYSIZE must have a |
|
362 | 362 | length of 1. |
|
363 | 363 | """ |
|
364 | 364 | flog = filectx.filelog() |
|
365 | 365 | frev = filectx.filerev() |
|
366 | 366 | revlogflags = flog._revlog.flags(frev) |
|
367 | 367 | if revlogflags == 0: |
|
368 | 368 | # normal files |
|
369 | 369 | text = filectx.data() |
|
370 | 370 | else: |
|
371 | 371 | # lfs, read raw revision data |
|
372 | 372 | text = flog.rawdata(frev) |
|
373 | 373 | |
|
374 | 374 | repo = filectx._repo |
|
375 | 375 | |
|
376 | 376 | ancestors = [filectx] |
|
377 | 377 | |
|
378 | 378 | try: |
|
379 | 379 | repo.forcelinkrev = True |
|
380 | 380 | ancestors.extend([f for f in filectx.ancestors()]) |
|
381 | 381 | |
|
382 | 382 | ancestortext = b"" |
|
383 | 383 | for ancestorctx in ancestors: |
|
384 | 384 | parents = ancestorctx.parents() |
|
385 | 385 | p1 = nullid |
|
386 | 386 | p2 = nullid |
|
387 | 387 | if len(parents) > 0: |
|
388 | 388 | p1 = parents[0].filenode() |
|
389 | 389 | if len(parents) > 1: |
|
390 | 390 | p2 = parents[1].filenode() |
|
391 | 391 | |
|
392 | 392 | copyname = b"" |
|
393 | 393 | rename = ancestorctx.renamed() |
|
394 | 394 | if rename: |
|
395 | 395 | copyname = rename[0] |
|
396 | 396 | linknode = ancestorctx.node() |
|
397 | 397 | ancestortext += b"%s%s%s%s%s\0" % ( |
|
398 | 398 | ancestorctx.filenode(), |
|
399 | 399 | p1, |
|
400 | 400 | p2, |
|
401 | 401 | linknode, |
|
402 | 402 | copyname, |
|
403 | 403 | ) |
|
404 | 404 | finally: |
|
405 | 405 | repo.forcelinkrev = False |
|
406 | 406 | |
|
407 | 407 | header = shallowutil.buildfileblobheader(len(text), revlogflags) |
|
408 | 408 | |
|
409 | 409 | return b"%s\0%s%s" % (header, text, ancestortext) |
|
410 | 410 | |
|
411 | 411 | |
|
412 | 412 | def gcserver(ui, repo): |
|
413 | 413 | if not repo.ui.configbool(b"remotefilelog", b"server"): |
|
414 | 414 | return |
|
415 | 415 | |
|
416 | 416 | neededfiles = set() |
|
417 | 417 | heads = repo.revs(b"heads(tip~25000:) - null") |
|
418 | 418 | |
|
419 | 419 | cachepath = repo.vfs.join(b"remotefilelogcache") |
|
420 | 420 | for head in heads: |
|
421 | 421 | mf = repo[head].manifest() |
|
422 | 422 | for filename, filenode in pycompat.iteritems(mf): |
|
423 | 423 | filecachepath = os.path.join(cachepath, filename, hex(filenode)) |
|
424 | 424 | neededfiles.add(filecachepath) |
|
425 | 425 | |
|
426 | 426 | # delete unneeded older files |
|
427 | 427 | days = repo.ui.configint(b"remotefilelog", b"serverexpiration") |
|
428 | 428 | expiration = time.time() - (days * 24 * 60 * 60) |
|
429 | 429 | |
|
430 | 430 | progress = ui.makeprogress(_(b"removing old server cache"), unit=b"files") |
|
431 | 431 | progress.update(0) |
|
432 | 432 | for root, dirs, files in os.walk(cachepath): |
|
433 | 433 | for file in files: |
|
434 | 434 | filepath = os.path.join(root, file) |
|
435 | 435 | progress.increment() |
|
436 | 436 | if filepath in neededfiles: |
|
437 | 437 | continue |
|
438 | 438 | |
|
439 | 439 | stat = os.stat(filepath) |
|
440 | 440 | if stat.st_mtime < expiration: |
|
441 | 441 | os.remove(filepath) |
|
442 | 442 | |
|
443 | 443 | progress.complete() |
@@ -1,2585 +1,2582 | |||
|
1 | 1 | # bundle2.py - generic container format to transmit arbitrary data. |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2013 Facebook, Inc. |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | """Handling of the new bundle2 format |
|
8 | 8 | |
|
9 | 9 | The goal of bundle2 is to act as an atomically packet to transmit a set of |
|
10 | 10 | payloads in an application agnostic way. It consist in a sequence of "parts" |
|
11 | 11 | that will be handed to and processed by the application layer. |
|
12 | 12 | |
|
13 | 13 | |
|
14 | 14 | General format architecture |
|
15 | 15 | =========================== |
|
16 | 16 | |
|
17 | 17 | The format is architectured as follow |
|
18 | 18 | |
|
19 | 19 | - magic string |
|
20 | 20 | - stream level parameters |
|
21 | 21 | - payload parts (any number) |
|
22 | 22 | - end of stream marker. |
|
23 | 23 | |
|
24 | 24 | the Binary format |
|
25 | 25 | ============================ |
|
26 | 26 | |
|
27 | 27 | All numbers are unsigned and big-endian. |
|
28 | 28 | |
|
29 | 29 | stream level parameters |
|
30 | 30 | ------------------------ |
|
31 | 31 | |
|
32 | 32 | Binary format is as follow |
|
33 | 33 | |
|
34 | 34 | :params size: int32 |
|
35 | 35 | |
|
36 | 36 | The total number of Bytes used by the parameters |
|
37 | 37 | |
|
38 | 38 | :params value: arbitrary number of Bytes |
|
39 | 39 | |
|
40 | 40 | A blob of `params size` containing the serialized version of all stream level |
|
41 | 41 | parameters. |
|
42 | 42 | |
|
43 | 43 | The blob contains a space separated list of parameters. Parameters with value |
|
44 | 44 | are stored in the form `<name>=<value>`. Both name and value are urlquoted. |
|
45 | 45 | |
|
46 | 46 | Empty name are obviously forbidden. |
|
47 | 47 | |
|
48 | 48 | Name MUST start with a letter. If this first letter is lower case, the |
|
49 | 49 | parameter is advisory and can be safely ignored. However when the first |
|
50 | 50 | letter is capital, the parameter is mandatory and the bundling process MUST |
|
51 | 51 | stop if he is not able to proceed it. |
|
52 | 52 | |
|
53 | 53 | Stream parameters use a simple textual format for two main reasons: |
|
54 | 54 | |
|
55 | 55 | - Stream level parameters should remain simple and we want to discourage any |
|
56 | 56 | crazy usage. |
|
57 | 57 | - Textual data allow easy human inspection of a bundle2 header in case of |
|
58 | 58 | troubles. |
|
59 | 59 | |
|
60 | 60 | Any Applicative level options MUST go into a bundle2 part instead. |
|
61 | 61 | |
|
62 | 62 | Payload part |
|
63 | 63 | ------------------------ |
|
64 | 64 | |
|
65 | 65 | Binary format is as follow |
|
66 | 66 | |
|
67 | 67 | :header size: int32 |
|
68 | 68 | |
|
69 | 69 | The total number of Bytes used by the part header. When the header is empty |
|
70 | 70 | (size = 0) this is interpreted as the end of stream marker. |
|
71 | 71 | |
|
72 | 72 | :header: |
|
73 | 73 | |
|
74 | 74 | The header defines how to interpret the part. It contains two piece of |
|
75 | 75 | data: the part type, and the part parameters. |
|
76 | 76 | |
|
77 | 77 | The part type is used to route an application level handler, that can |
|
78 | 78 | interpret payload. |
|
79 | 79 | |
|
80 | 80 | Part parameters are passed to the application level handler. They are |
|
81 | 81 | meant to convey information that will help the application level object to |
|
82 | 82 | interpret the part payload. |
|
83 | 83 | |
|
84 | 84 | The binary format of the header is has follow |
|
85 | 85 | |
|
86 | 86 | :typesize: (one byte) |
|
87 | 87 | |
|
88 | 88 | :parttype: alphanumerical part name (restricted to [a-zA-Z0-9_:-]*) |
|
89 | 89 | |
|
90 | 90 | :partid: A 32bits integer (unique in the bundle) that can be used to refer |
|
91 | 91 | to this part. |
|
92 | 92 | |
|
93 | 93 | :parameters: |
|
94 | 94 | |
|
95 | 95 | Part's parameter may have arbitrary content, the binary structure is:: |
|
96 | 96 | |
|
97 | 97 | <mandatory-count><advisory-count><param-sizes><param-data> |
|
98 | 98 | |
|
99 | 99 | :mandatory-count: 1 byte, number of mandatory parameters |
|
100 | 100 | |
|
101 | 101 | :advisory-count: 1 byte, number of advisory parameters |
|
102 | 102 | |
|
103 | 103 | :param-sizes: |
|
104 | 104 | |
|
105 | 105 | N couple of bytes, where N is the total number of parameters. Each |
|
106 | 106 | couple contains (<size-of-key>, <size-of-value) for one parameter. |
|
107 | 107 | |
|
108 | 108 | :param-data: |
|
109 | 109 | |
|
110 | 110 | A blob of bytes from which each parameter key and value can be |
|
111 | 111 | retrieved using the list of size couples stored in the previous |
|
112 | 112 | field. |
|
113 | 113 | |
|
114 | 114 | Mandatory parameters comes first, then the advisory ones. |
|
115 | 115 | |
|
116 | 116 | Each parameter's key MUST be unique within the part. |
|
117 | 117 | |
|
118 | 118 | :payload: |
|
119 | 119 | |
|
120 | 120 | payload is a series of `<chunksize><chunkdata>`. |
|
121 | 121 | |
|
122 | 122 | `chunksize` is an int32, `chunkdata` are plain bytes (as much as |
|
123 | 123 | `chunksize` says)` The payload part is concluded by a zero size chunk. |
|
124 | 124 | |
|
125 | 125 | The current implementation always produces either zero or one chunk. |
|
126 | 126 | This is an implementation limitation that will ultimately be lifted. |
|
127 | 127 | |
|
128 | 128 | `chunksize` can be negative to trigger special case processing. No such |
|
129 | 129 | processing is in place yet. |
|
130 | 130 | |
|
131 | 131 | Bundle processing |
|
132 | 132 | ============================ |
|
133 | 133 | |
|
134 | 134 | Each part is processed in order using a "part handler". Handler are registered |
|
135 | 135 | for a certain part type. |
|
136 | 136 | |
|
137 | 137 | The matching of a part to its handler is case insensitive. The case of the |
|
138 | 138 | part type is used to know if a part is mandatory or advisory. If the Part type |
|
139 | 139 | contains any uppercase char it is considered mandatory. When no handler is |
|
140 | 140 | known for a Mandatory part, the process is aborted and an exception is raised. |
|
141 | 141 | If the part is advisory and no handler is known, the part is ignored. When the |
|
142 | 142 | process is aborted, the full bundle is still read from the stream to keep the |
|
143 | 143 | channel usable. But none of the part read from an abort are processed. In the |
|
144 | 144 | future, dropping the stream may become an option for channel we do not care to |
|
145 | 145 | preserve. |
|
146 | 146 | """ |
|
147 | 147 | |
|
148 | 148 | from __future__ import absolute_import, division |
|
149 | 149 | |
|
150 | 150 | import collections |
|
151 | 151 | import errno |
|
152 | 152 | import os |
|
153 | 153 | import re |
|
154 | 154 | import string |
|
155 | 155 | import struct |
|
156 | 156 | import sys |
|
157 | 157 | |
|
158 | 158 | from .i18n import _ |
|
159 | 159 | from . import ( |
|
160 | 160 | bookmarks, |
|
161 | 161 | changegroup, |
|
162 | 162 | encoding, |
|
163 | 163 | error, |
|
164 | 164 | node as nodemod, |
|
165 | 165 | obsolete, |
|
166 | 166 | phases, |
|
167 | 167 | pushkey, |
|
168 | 168 | pycompat, |
|
169 | 169 | requirements, |
|
170 | 170 | scmutil, |
|
171 | 171 | streamclone, |
|
172 | 172 | tags, |
|
173 | 173 | url, |
|
174 | 174 | util, |
|
175 | 175 | ) |
|
176 | 176 | from .utils import stringutil |
|
177 | 177 | |
|
178 | 178 | urlerr = util.urlerr |
|
179 | 179 | urlreq = util.urlreq |
|
180 | 180 | |
|
181 | 181 | _pack = struct.pack |
|
182 | 182 | _unpack = struct.unpack |
|
183 | 183 | |
|
184 | 184 | _fstreamparamsize = b'>i' |
|
185 | 185 | _fpartheadersize = b'>i' |
|
186 | 186 | _fparttypesize = b'>B' |
|
187 | 187 | _fpartid = b'>I' |
|
188 | 188 | _fpayloadsize = b'>i' |
|
189 | 189 | _fpartparamcount = b'>BB' |
|
190 | 190 | |
|
191 | 191 | preferedchunksize = 32768 |
|
192 | 192 | |
|
193 | 193 | _parttypeforbidden = re.compile(b'[^a-zA-Z0-9_:-]') |
|
194 | 194 | |
|
195 | 195 | |
|
196 | 196 | def outdebug(ui, message): |
|
197 | 197 | """debug regarding output stream (bundling)""" |
|
198 | 198 | if ui.configbool(b'devel', b'bundle2.debug'): |
|
199 | 199 | ui.debug(b'bundle2-output: %s\n' % message) |
|
200 | 200 | |
|
201 | 201 | |
|
202 | 202 | def indebug(ui, message): |
|
203 | 203 | """debug on input stream (unbundling)""" |
|
204 | 204 | if ui.configbool(b'devel', b'bundle2.debug'): |
|
205 | 205 | ui.debug(b'bundle2-input: %s\n' % message) |
|
206 | 206 | |
|
207 | 207 | |
|
208 | 208 | def validateparttype(parttype): |
|
209 | 209 | """raise ValueError if a parttype contains invalid character""" |
|
210 | 210 | if _parttypeforbidden.search(parttype): |
|
211 | 211 | raise ValueError(parttype) |
|
212 | 212 | |
|
213 | 213 | |
|
214 | 214 | def _makefpartparamsizes(nbparams): |
|
215 | 215 | """return a struct format to read part parameter sizes |
|
216 | 216 | |
|
217 | 217 | The number parameters is variable so we need to build that format |
|
218 | 218 | dynamically. |
|
219 | 219 | """ |
|
220 | 220 | return b'>' + (b'BB' * nbparams) |
|
221 | 221 | |
|
222 | 222 | |
|
223 | 223 | parthandlermapping = {} |
|
224 | 224 | |
|
225 | 225 | |
|
226 | 226 | def parthandler(parttype, params=()): |
|
227 | 227 | """decorator that register a function as a bundle2 part handler |
|
228 | 228 | |
|
229 | 229 | eg:: |
|
230 | 230 | |
|
231 | 231 | @parthandler('myparttype', ('mandatory', 'param', 'handled')) |
|
232 | 232 | def myparttypehandler(...): |
|
233 | 233 | '''process a part of type "my part".''' |
|
234 | 234 | ... |
|
235 | 235 | """ |
|
236 | 236 | validateparttype(parttype) |
|
237 | 237 | |
|
238 | 238 | def _decorator(func): |
|
239 | 239 | lparttype = parttype.lower() # enforce lower case matching. |
|
240 | 240 | assert lparttype not in parthandlermapping |
|
241 | 241 | parthandlermapping[lparttype] = func |
|
242 | 242 | func.params = frozenset(params) |
|
243 | 243 | return func |
|
244 | 244 | |
|
245 | 245 | return _decorator |
|
246 | 246 | |
|
247 | 247 | |
|
248 | 248 | class unbundlerecords(object): |
|
249 | 249 | """keep record of what happens during and unbundle |
|
250 | 250 | |
|
251 | 251 | New records are added using `records.add('cat', obj)`. Where 'cat' is a |
|
252 | 252 | category of record and obj is an arbitrary object. |
|
253 | 253 | |
|
254 | 254 | `records['cat']` will return all entries of this category 'cat'. |
|
255 | 255 | |
|
256 | 256 | Iterating on the object itself will yield `('category', obj)` tuples |
|
257 | 257 | for all entries. |
|
258 | 258 | |
|
259 | 259 | All iterations happens in chronological order. |
|
260 | 260 | """ |
|
261 | 261 | |
|
262 | 262 | def __init__(self): |
|
263 | 263 | self._categories = {} |
|
264 | 264 | self._sequences = [] |
|
265 | 265 | self._replies = {} |
|
266 | 266 | |
|
267 | 267 | def add(self, category, entry, inreplyto=None): |
|
268 | 268 | """add a new record of a given category. |
|
269 | 269 | |
|
270 | 270 | The entry can then be retrieved in the list returned by |
|
271 | 271 | self['category'].""" |
|
272 | 272 | self._categories.setdefault(category, []).append(entry) |
|
273 | 273 | self._sequences.append((category, entry)) |
|
274 | 274 | if inreplyto is not None: |
|
275 | 275 | self.getreplies(inreplyto).add(category, entry) |
|
276 | 276 | |
|
277 | 277 | def getreplies(self, partid): |
|
278 | 278 | """get the records that are replies to a specific part""" |
|
279 | 279 | return self._replies.setdefault(partid, unbundlerecords()) |
|
280 | 280 | |
|
281 | 281 | def __getitem__(self, cat): |
|
282 | 282 | return tuple(self._categories.get(cat, ())) |
|
283 | 283 | |
|
284 | 284 | def __iter__(self): |
|
285 | 285 | return iter(self._sequences) |
|
286 | 286 | |
|
287 | 287 | def __len__(self): |
|
288 | 288 | return len(self._sequences) |
|
289 | 289 | |
|
290 | 290 | def __nonzero__(self): |
|
291 | 291 | return bool(self._sequences) |
|
292 | 292 | |
|
293 | 293 | __bool__ = __nonzero__ |
|
294 | 294 | |
|
295 | 295 | |
|
296 | 296 | class bundleoperation(object): |
|
297 | 297 | """an object that represents a single bundling process |
|
298 | 298 | |
|
299 | 299 | Its purpose is to carry unbundle-related objects and states. |
|
300 | 300 | |
|
301 | 301 | A new object should be created at the beginning of each bundle processing. |
|
302 | 302 | The object is to be returned by the processing function. |
|
303 | 303 | |
|
304 | 304 | The object has very little content now it will ultimately contain: |
|
305 | 305 | * an access to the repo the bundle is applied to, |
|
306 | 306 | * a ui object, |
|
307 | 307 | * a way to retrieve a transaction to add changes to the repo, |
|
308 | 308 | * a way to record the result of processing each part, |
|
309 | 309 | * a way to construct a bundle response when applicable. |
|
310 | 310 | """ |
|
311 | 311 | |
|
312 | 312 | def __init__(self, repo, transactiongetter, captureoutput=True, source=b''): |
|
313 | 313 | self.repo = repo |
|
314 | 314 | self.ui = repo.ui |
|
315 | 315 | self.records = unbundlerecords() |
|
316 | 316 | self.reply = None |
|
317 | 317 | self.captureoutput = captureoutput |
|
318 | 318 | self.hookargs = {} |
|
319 | 319 | self._gettransaction = transactiongetter |
|
320 | 320 | # carries value that can modify part behavior |
|
321 | 321 | self.modes = {} |
|
322 | 322 | self.source = source |
|
323 | 323 | |
|
324 | 324 | def gettransaction(self): |
|
325 | 325 | transaction = self._gettransaction() |
|
326 | 326 | |
|
327 | 327 | if self.hookargs: |
|
328 | 328 | # the ones added to the transaction supercede those added |
|
329 | 329 | # to the operation. |
|
330 | 330 | self.hookargs.update(transaction.hookargs) |
|
331 | 331 | transaction.hookargs = self.hookargs |
|
332 | 332 | |
|
333 | 333 | # mark the hookargs as flushed. further attempts to add to |
|
334 | 334 | # hookargs will result in an abort. |
|
335 | 335 | self.hookargs = None |
|
336 | 336 | |
|
337 | 337 | return transaction |
|
338 | 338 | |
|
339 | 339 | def addhookargs(self, hookargs): |
|
340 | 340 | if self.hookargs is None: |
|
341 | 341 | raise error.ProgrammingError( |
|
342 | 342 | b'attempted to add hookargs to ' |
|
343 | 343 | b'operation after transaction started' |
|
344 | 344 | ) |
|
345 | 345 | self.hookargs.update(hookargs) |
|
346 | 346 | |
|
347 | 347 | |
|
348 | 348 | class TransactionUnavailable(RuntimeError): |
|
349 | 349 | pass |
|
350 | 350 | |
|
351 | 351 | |
|
352 | 352 | def _notransaction(): |
|
353 | 353 | """default method to get a transaction while processing a bundle |
|
354 | 354 | |
|
355 | 355 | Raise an exception to highlight the fact that no transaction was expected |
|
356 | 356 | to be created""" |
|
357 | 357 | raise TransactionUnavailable() |
|
358 | 358 | |
|
359 | 359 | |
|
360 | 360 | def applybundle(repo, unbundler, tr, source, url=None, **kwargs): |
|
361 | 361 | # transform me into unbundler.apply() as soon as the freeze is lifted |
|
362 | 362 | if isinstance(unbundler, unbundle20): |
|
363 | 363 | tr.hookargs[b'bundle2'] = b'1' |
|
364 | 364 | if source is not None and b'source' not in tr.hookargs: |
|
365 | 365 | tr.hookargs[b'source'] = source |
|
366 | 366 | if url is not None and b'url' not in tr.hookargs: |
|
367 | 367 | tr.hookargs[b'url'] = url |
|
368 | 368 | return processbundle(repo, unbundler, lambda: tr, source=source) |
|
369 | 369 | else: |
|
370 | 370 | # the transactiongetter won't be used, but we might as well set it |
|
371 | 371 | op = bundleoperation(repo, lambda: tr, source=source) |
|
372 | 372 | _processchangegroup(op, unbundler, tr, source, url, **kwargs) |
|
373 | 373 | return op |
|
374 | 374 | |
|
375 | 375 | |
|
376 | 376 | class partiterator(object): |
|
377 | 377 | def __init__(self, repo, op, unbundler): |
|
378 | 378 | self.repo = repo |
|
379 | 379 | self.op = op |
|
380 | 380 | self.unbundler = unbundler |
|
381 | 381 | self.iterator = None |
|
382 | 382 | self.count = 0 |
|
383 | 383 | self.current = None |
|
384 | 384 | |
|
385 | 385 | def __enter__(self): |
|
386 | 386 | def func(): |
|
387 | 387 | itr = enumerate(self.unbundler.iterparts(), 1) |
|
388 | 388 | for count, p in itr: |
|
389 | 389 | self.count = count |
|
390 | 390 | self.current = p |
|
391 | 391 | yield p |
|
392 | 392 | p.consume() |
|
393 | 393 | self.current = None |
|
394 | 394 | |
|
395 | 395 | self.iterator = func() |
|
396 | 396 | return self.iterator |
|
397 | 397 | |
|
398 | 398 | def __exit__(self, type, exc, tb): |
|
399 | 399 | if not self.iterator: |
|
400 | 400 | return |
|
401 | 401 | |
|
402 | 402 | # Only gracefully abort in a normal exception situation. User aborts |
|
403 | 403 | # like Ctrl+C throw a KeyboardInterrupt which is not a base Exception, |
|
404 | 404 | # and should not gracefully cleanup. |
|
405 | 405 | if isinstance(exc, Exception): |
|
406 | 406 | # Any exceptions seeking to the end of the bundle at this point are |
|
407 | 407 | # almost certainly related to the underlying stream being bad. |
|
408 | 408 | # And, chances are that the exception we're handling is related to |
|
409 | 409 | # getting in that bad state. So, we swallow the seeking error and |
|
410 | 410 | # re-raise the original error. |
|
411 | 411 | seekerror = False |
|
412 | 412 | try: |
|
413 | 413 | if self.current: |
|
414 | 414 | # consume the part content to not corrupt the stream. |
|
415 | 415 | self.current.consume() |
|
416 | 416 | |
|
417 | 417 | for part in self.iterator: |
|
418 | 418 | # consume the bundle content |
|
419 | 419 | part.consume() |
|
420 | 420 | except Exception: |
|
421 | 421 | seekerror = True |
|
422 | 422 | |
|
423 | 423 | # Small hack to let caller code distinguish exceptions from bundle2 |
|
424 | 424 | # processing from processing the old format. This is mostly needed |
|
425 | 425 | # to handle different return codes to unbundle according to the type |
|
426 | 426 | # of bundle. We should probably clean up or drop this return code |
|
427 | 427 | # craziness in a future version. |
|
428 | 428 | exc.duringunbundle2 = True |
|
429 | 429 | salvaged = [] |
|
430 | 430 | replycaps = None |
|
431 | 431 | if self.op.reply is not None: |
|
432 | 432 | salvaged = self.op.reply.salvageoutput() |
|
433 | 433 | replycaps = self.op.reply.capabilities |
|
434 | 434 | exc._replycaps = replycaps |
|
435 | 435 | exc._bundle2salvagedoutput = salvaged |
|
436 | 436 | |
|
437 | 437 | # Re-raising from a variable loses the original stack. So only use |
|
438 | 438 | # that form if we need to. |
|
439 | 439 | if seekerror: |
|
440 | 440 | raise exc |
|
441 | 441 | |
|
442 | 442 | self.repo.ui.debug( |
|
443 | 443 | b'bundle2-input-bundle: %i parts total\n' % self.count |
|
444 | 444 | ) |
|
445 | 445 | |
|
446 | 446 | |
|
447 | 447 | def processbundle(repo, unbundler, transactiongetter=None, op=None, source=b''): |
|
448 | 448 | """This function process a bundle, apply effect to/from a repo |
|
449 | 449 | |
|
450 | 450 | It iterates over each part then searches for and uses the proper handling |
|
451 | 451 | code to process the part. Parts are processed in order. |
|
452 | 452 | |
|
453 | 453 | Unknown Mandatory part will abort the process. |
|
454 | 454 | |
|
455 | 455 | It is temporarily possible to provide a prebuilt bundleoperation to the |
|
456 | 456 | function. This is used to ensure output is properly propagated in case of |
|
457 | 457 | an error during the unbundling. This output capturing part will likely be |
|
458 | 458 | reworked and this ability will probably go away in the process. |
|
459 | 459 | """ |
|
460 | 460 | if op is None: |
|
461 | 461 | if transactiongetter is None: |
|
462 | 462 | transactiongetter = _notransaction |
|
463 | 463 | op = bundleoperation(repo, transactiongetter, source=source) |
|
464 | 464 | # todo: |
|
465 | 465 | # - replace this is a init function soon. |
|
466 | 466 | # - exception catching |
|
467 | 467 | unbundler.params |
|
468 | 468 | if repo.ui.debugflag: |
|
469 | 469 | msg = [b'bundle2-input-bundle:'] |
|
470 | 470 | if unbundler.params: |
|
471 | 471 | msg.append(b' %i params' % len(unbundler.params)) |
|
472 | 472 | if op._gettransaction is None or op._gettransaction is _notransaction: |
|
473 | 473 | msg.append(b' no-transaction') |
|
474 | 474 | else: |
|
475 | 475 | msg.append(b' with-transaction') |
|
476 | 476 | msg.append(b'\n') |
|
477 | 477 | repo.ui.debug(b''.join(msg)) |
|
478 | 478 | |
|
479 | 479 | processparts(repo, op, unbundler) |
|
480 | 480 | |
|
481 | 481 | return op |
|
482 | 482 | |
|
483 | 483 | |
|
484 | 484 | def processparts(repo, op, unbundler): |
|
485 | 485 | with partiterator(repo, op, unbundler) as parts: |
|
486 | 486 | for part in parts: |
|
487 | 487 | _processpart(op, part) |
|
488 | 488 | |
|
489 | 489 | |
|
490 | 490 | def _processchangegroup(op, cg, tr, source, url, **kwargs): |
|
491 | 491 | ret = cg.apply(op.repo, tr, source, url, **kwargs) |
|
492 | 492 | op.records.add(b'changegroup', {b'return': ret,}) |
|
493 | 493 | return ret |
|
494 | 494 | |
|
495 | 495 | |
|
496 | 496 | def _gethandler(op, part): |
|
497 | 497 | status = b'unknown' # used by debug output |
|
498 | 498 | try: |
|
499 | 499 | handler = parthandlermapping.get(part.type) |
|
500 | 500 | if handler is None: |
|
501 | 501 | status = b'unsupported-type' |
|
502 | 502 | raise error.BundleUnknownFeatureError(parttype=part.type) |
|
503 | 503 | indebug(op.ui, b'found a handler for part %s' % part.type) |
|
504 | 504 | unknownparams = part.mandatorykeys - handler.params |
|
505 | 505 | if unknownparams: |
|
506 | 506 | unknownparams = list(unknownparams) |
|
507 | 507 | unknownparams.sort() |
|
508 | 508 | status = b'unsupported-params (%s)' % b', '.join(unknownparams) |
|
509 | 509 | raise error.BundleUnknownFeatureError( |
|
510 | 510 | parttype=part.type, params=unknownparams |
|
511 | 511 | ) |
|
512 | 512 | status = b'supported' |
|
513 | 513 | except error.BundleUnknownFeatureError as exc: |
|
514 | 514 | if part.mandatory: # mandatory parts |
|
515 | 515 | raise |
|
516 | 516 | indebug(op.ui, b'ignoring unsupported advisory part %s' % exc) |
|
517 | 517 | return # skip to part processing |
|
518 | 518 | finally: |
|
519 | 519 | if op.ui.debugflag: |
|
520 | 520 | msg = [b'bundle2-input-part: "%s"' % part.type] |
|
521 | 521 | if not part.mandatory: |
|
522 | 522 | msg.append(b' (advisory)') |
|
523 | 523 | nbmp = len(part.mandatorykeys) |
|
524 | 524 | nbap = len(part.params) - nbmp |
|
525 | 525 | if nbmp or nbap: |
|
526 | 526 | msg.append(b' (params:') |
|
527 | 527 | if nbmp: |
|
528 | 528 | msg.append(b' %i mandatory' % nbmp) |
|
529 | 529 | if nbap: |
|
530 | 530 | msg.append(b' %i advisory' % nbmp) |
|
531 | 531 | msg.append(b')') |
|
532 | 532 | msg.append(b' %s\n' % status) |
|
533 | 533 | op.ui.debug(b''.join(msg)) |
|
534 | 534 | |
|
535 | 535 | return handler |
|
536 | 536 | |
|
537 | 537 | |
|
538 | 538 | def _processpart(op, part): |
|
539 | 539 | """process a single part from a bundle |
|
540 | 540 | |
|
541 | 541 | The part is guaranteed to have been fully consumed when the function exits |
|
542 | 542 | (even if an exception is raised).""" |
|
543 | 543 | handler = _gethandler(op, part) |
|
544 | 544 | if handler is None: |
|
545 | 545 | return |
|
546 | 546 | |
|
547 | 547 | # handler is called outside the above try block so that we don't |
|
548 | 548 | # risk catching KeyErrors from anything other than the |
|
549 | 549 | # parthandlermapping lookup (any KeyError raised by handler() |
|
550 | 550 | # itself represents a defect of a different variety). |
|
551 | 551 | output = None |
|
552 | 552 | if op.captureoutput and op.reply is not None: |
|
553 | 553 | op.ui.pushbuffer(error=True, subproc=True) |
|
554 | 554 | output = b'' |
|
555 | 555 | try: |
|
556 | 556 | handler(op, part) |
|
557 | 557 | finally: |
|
558 | 558 | if output is not None: |
|
559 | 559 | output = op.ui.popbuffer() |
|
560 | 560 | if output: |
|
561 | 561 | outpart = op.reply.newpart(b'output', data=output, mandatory=False) |
|
562 | 562 | outpart.addparam( |
|
563 | 563 | b'in-reply-to', pycompat.bytestr(part.id), mandatory=False |
|
564 | 564 | ) |
|
565 | 565 | |
|
566 | 566 | |
|
567 | 567 | def decodecaps(blob): |
|
568 | 568 | """decode a bundle2 caps bytes blob into a dictionary |
|
569 | 569 | |
|
570 | 570 | The blob is a list of capabilities (one per line) |
|
571 | 571 | Capabilities may have values using a line of the form:: |
|
572 | 572 | |
|
573 | 573 | capability=value1,value2,value3 |
|
574 | 574 | |
|
575 | 575 | The values are always a list.""" |
|
576 | 576 | caps = {} |
|
577 | 577 | for line in blob.splitlines(): |
|
578 | 578 | if not line: |
|
579 | 579 | continue |
|
580 | 580 | if b'=' not in line: |
|
581 | 581 | key, vals = line, () |
|
582 | 582 | else: |
|
583 | 583 | key, vals = line.split(b'=', 1) |
|
584 | 584 | vals = vals.split(b',') |
|
585 | 585 | key = urlreq.unquote(key) |
|
586 | 586 | vals = [urlreq.unquote(v) for v in vals] |
|
587 | 587 | caps[key] = vals |
|
588 | 588 | return caps |
|
589 | 589 | |
|
590 | 590 | |
|
591 | 591 | def encodecaps(caps): |
|
592 | 592 | """encode a bundle2 caps dictionary into a bytes blob""" |
|
593 | 593 | chunks = [] |
|
594 | 594 | for ca in sorted(caps): |
|
595 | 595 | vals = caps[ca] |
|
596 | 596 | ca = urlreq.quote(ca) |
|
597 | 597 | vals = [urlreq.quote(v) for v in vals] |
|
598 | 598 | if vals: |
|
599 | 599 | ca = b"%s=%s" % (ca, b','.join(vals)) |
|
600 | 600 | chunks.append(ca) |
|
601 | 601 | return b'\n'.join(chunks) |
|
602 | 602 | |
|
603 | 603 | |
|
604 | 604 | bundletypes = { |
|
605 | 605 | b"": (b"", b'UN'), # only when using unbundle on ssh and old http servers |
|
606 | 606 | # since the unification ssh accepts a header but there |
|
607 | 607 | # is no capability signaling it. |
|
608 | 608 | b"HG20": (), # special-cased below |
|
609 | 609 | b"HG10UN": (b"HG10UN", b'UN'), |
|
610 | 610 | b"HG10BZ": (b"HG10", b'BZ'), |
|
611 | 611 | b"HG10GZ": (b"HG10GZ", b'GZ'), |
|
612 | 612 | } |
|
613 | 613 | |
|
614 | 614 | # hgweb uses this list to communicate its preferred type |
|
615 | 615 | bundlepriority = [b'HG10GZ', b'HG10BZ', b'HG10UN'] |
|
616 | 616 | |
|
617 | 617 | |
|
618 | 618 | class bundle20(object): |
|
619 | 619 | """represent an outgoing bundle2 container |
|
620 | 620 | |
|
621 | 621 | Use the `addparam` method to add stream level parameter. and `newpart` to |
|
622 | 622 | populate it. Then call `getchunks` to retrieve all the binary chunks of |
|
623 | 623 | data that compose the bundle2 container.""" |
|
624 | 624 | |
|
625 | 625 | _magicstring = b'HG20' |
|
626 | 626 | |
|
627 | 627 | def __init__(self, ui, capabilities=()): |
|
628 | 628 | self.ui = ui |
|
629 | 629 | self._params = [] |
|
630 | 630 | self._parts = [] |
|
631 | 631 | self.capabilities = dict(capabilities) |
|
632 | 632 | self._compengine = util.compengines.forbundletype(b'UN') |
|
633 | 633 | self._compopts = None |
|
634 | 634 | # If compression is being handled by a consumer of the raw |
|
635 | 635 | # data (e.g. the wire protocol), unsetting this flag tells |
|
636 | 636 | # consumers that the bundle is best left uncompressed. |
|
637 | 637 | self.prefercompressed = True |
|
638 | 638 | |
|
639 | 639 | def setcompression(self, alg, compopts=None): |
|
640 | 640 | """setup core part compression to <alg>""" |
|
641 | 641 | if alg in (None, b'UN'): |
|
642 | 642 | return |
|
643 | 643 | assert not any(n.lower() == b'compression' for n, v in self._params) |
|
644 | 644 | self.addparam(b'Compression', alg) |
|
645 | 645 | self._compengine = util.compengines.forbundletype(alg) |
|
646 | 646 | self._compopts = compopts |
|
647 | 647 | |
|
648 | 648 | @property |
|
649 | 649 | def nbparts(self): |
|
650 | 650 | """total number of parts added to the bundler""" |
|
651 | 651 | return len(self._parts) |
|
652 | 652 | |
|
653 | 653 | # methods used to defines the bundle2 content |
|
654 | 654 | def addparam(self, name, value=None): |
|
655 | 655 | """add a stream level parameter""" |
|
656 | 656 | if not name: |
|
657 | 657 | raise error.ProgrammingError(b'empty parameter name') |
|
658 | 658 | if name[0:1] not in pycompat.bytestr( |
|
659 | 659 | string.ascii_letters # pytype: disable=wrong-arg-types |
|
660 | 660 | ): |
|
661 | 661 | raise error.ProgrammingError( |
|
662 | 662 | b'non letter first character: %s' % name |
|
663 | 663 | ) |
|
664 | 664 | self._params.append((name, value)) |
|
665 | 665 | |
|
666 | 666 | def addpart(self, part): |
|
667 | 667 | """add a new part to the bundle2 container |
|
668 | 668 | |
|
669 | 669 | Parts contains the actual applicative payload.""" |
|
670 | 670 | assert part.id is None |
|
671 | 671 | part.id = len(self._parts) # very cheap counter |
|
672 | 672 | self._parts.append(part) |
|
673 | 673 | |
|
674 | 674 | def newpart(self, typeid, *args, **kwargs): |
|
675 | 675 | """create a new part and add it to the containers |
|
676 | 676 | |
|
677 | 677 | As the part is directly added to the containers. For now, this means |
|
678 | 678 | that any failure to properly initialize the part after calling |
|
679 | 679 | ``newpart`` should result in a failure of the whole bundling process. |
|
680 | 680 | |
|
681 | 681 | You can still fall back to manually create and add if you need better |
|
682 | 682 | control.""" |
|
683 | 683 | part = bundlepart(typeid, *args, **kwargs) |
|
684 | 684 | self.addpart(part) |
|
685 | 685 | return part |
|
686 | 686 | |
|
687 | 687 | # methods used to generate the bundle2 stream |
|
688 | 688 | def getchunks(self): |
|
689 | 689 | if self.ui.debugflag: |
|
690 | 690 | msg = [b'bundle2-output-bundle: "%s",' % self._magicstring] |
|
691 | 691 | if self._params: |
|
692 | 692 | msg.append(b' (%i params)' % len(self._params)) |
|
693 | 693 | msg.append(b' %i parts total\n' % len(self._parts)) |
|
694 | 694 | self.ui.debug(b''.join(msg)) |
|
695 | 695 | outdebug(self.ui, b'start emission of %s stream' % self._magicstring) |
|
696 | 696 | yield self._magicstring |
|
697 | 697 | param = self._paramchunk() |
|
698 | 698 | outdebug(self.ui, b'bundle parameter: %s' % param) |
|
699 | 699 | yield _pack(_fstreamparamsize, len(param)) |
|
700 | 700 | if param: |
|
701 | 701 | yield param |
|
702 | 702 | for chunk in self._compengine.compressstream( |
|
703 | 703 | self._getcorechunk(), self._compopts |
|
704 | 704 | ): |
|
705 | 705 | yield chunk |
|
706 | 706 | |
|
707 | 707 | def _paramchunk(self): |
|
708 | 708 | """return a encoded version of all stream parameters""" |
|
709 | 709 | blocks = [] |
|
710 | 710 | for par, value in self._params: |
|
711 | 711 | par = urlreq.quote(par) |
|
712 | 712 | if value is not None: |
|
713 | 713 | value = urlreq.quote(value) |
|
714 | 714 | par = b'%s=%s' % (par, value) |
|
715 | 715 | blocks.append(par) |
|
716 | 716 | return b' '.join(blocks) |
|
717 | 717 | |
|
718 | 718 | def _getcorechunk(self): |
|
719 | 719 | """yield chunk for the core part of the bundle |
|
720 | 720 | |
|
721 | 721 | (all but headers and parameters)""" |
|
722 | 722 | outdebug(self.ui, b'start of parts') |
|
723 | 723 | for part in self._parts: |
|
724 | 724 | outdebug(self.ui, b'bundle part: "%s"' % part.type) |
|
725 | 725 | for chunk in part.getchunks(ui=self.ui): |
|
726 | 726 | yield chunk |
|
727 | 727 | outdebug(self.ui, b'end of bundle') |
|
728 | 728 | yield _pack(_fpartheadersize, 0) |
|
729 | 729 | |
|
730 | 730 | def salvageoutput(self): |
|
731 | 731 | """return a list with a copy of all output parts in the bundle |
|
732 | 732 | |
|
733 | 733 | This is meant to be used during error handling to make sure we preserve |
|
734 | 734 | server output""" |
|
735 | 735 | salvaged = [] |
|
736 | 736 | for part in self._parts: |
|
737 | 737 | if part.type.startswith(b'output'): |
|
738 | 738 | salvaged.append(part.copy()) |
|
739 | 739 | return salvaged |
|
740 | 740 | |
|
741 | 741 | |
|
742 | 742 | class unpackermixin(object): |
|
743 | 743 | """A mixin to extract bytes and struct data from a stream""" |
|
744 | 744 | |
|
745 | 745 | def __init__(self, fp): |
|
746 | 746 | self._fp = fp |
|
747 | 747 | |
|
748 | 748 | def _unpack(self, format): |
|
749 | 749 | """unpack this struct format from the stream |
|
750 | 750 | |
|
751 | 751 | This method is meant for internal usage by the bundle2 protocol only. |
|
752 | 752 | They directly manipulate the low level stream including bundle2 level |
|
753 | 753 | instruction. |
|
754 | 754 | |
|
755 | 755 | Do not use it to implement higher-level logic or methods.""" |
|
756 | 756 | data = self._readexact(struct.calcsize(format)) |
|
757 | 757 | return _unpack(format, data) |
|
758 | 758 | |
|
759 | 759 | def _readexact(self, size): |
|
760 | 760 | """read exactly <size> bytes from the stream |
|
761 | 761 | |
|
762 | 762 | This method is meant for internal usage by the bundle2 protocol only. |
|
763 | 763 | They directly manipulate the low level stream including bundle2 level |
|
764 | 764 | instruction. |
|
765 | 765 | |
|
766 | 766 | Do not use it to implement higher-level logic or methods.""" |
|
767 | 767 | return changegroup.readexactly(self._fp, size) |
|
768 | 768 | |
|
769 | 769 | |
|
770 | 770 | def getunbundler(ui, fp, magicstring=None): |
|
771 | 771 | """return a valid unbundler object for a given magicstring""" |
|
772 | 772 | if magicstring is None: |
|
773 | 773 | magicstring = changegroup.readexactly(fp, 4) |
|
774 | 774 | magic, version = magicstring[0:2], magicstring[2:4] |
|
775 | 775 | if magic != b'HG': |
|
776 | 776 | ui.debug( |
|
777 | 777 | b"error: invalid magic: %r (version %r), should be 'HG'\n" |
|
778 | 778 | % (magic, version) |
|
779 | 779 | ) |
|
780 | 780 | raise error.Abort(_(b'not a Mercurial bundle')) |
|
781 | 781 | unbundlerclass = formatmap.get(version) |
|
782 | 782 | if unbundlerclass is None: |
|
783 | 783 | raise error.Abort(_(b'unknown bundle version %s') % version) |
|
784 | 784 | unbundler = unbundlerclass(ui, fp) |
|
785 | 785 | indebug(ui, b'start processing of %s stream' % magicstring) |
|
786 | 786 | return unbundler |
|
787 | 787 | |
|
788 | 788 | |
|
789 | 789 | class unbundle20(unpackermixin): |
|
790 | 790 | """interpret a bundle2 stream |
|
791 | 791 | |
|
792 | 792 | This class is fed with a binary stream and yields parts through its |
|
793 | 793 | `iterparts` methods.""" |
|
794 | 794 | |
|
795 | 795 | _magicstring = b'HG20' |
|
796 | 796 | |
|
797 | 797 | def __init__(self, ui, fp): |
|
798 | 798 | """If header is specified, we do not read it out of the stream.""" |
|
799 | 799 | self.ui = ui |
|
800 | 800 | self._compengine = util.compengines.forbundletype(b'UN') |
|
801 | 801 | self._compressed = None |
|
802 | 802 | super(unbundle20, self).__init__(fp) |
|
803 | 803 | |
|
804 | 804 | @util.propertycache |
|
805 | 805 | def params(self): |
|
806 | 806 | """dictionary of stream level parameters""" |
|
807 | 807 | indebug(self.ui, b'reading bundle2 stream parameters') |
|
808 | 808 | params = {} |
|
809 | 809 | paramssize = self._unpack(_fstreamparamsize)[0] |
|
810 | 810 | if paramssize < 0: |
|
811 | 811 | raise error.BundleValueError( |
|
812 | 812 | b'negative bundle param size: %i' % paramssize |
|
813 | 813 | ) |
|
814 | 814 | if paramssize: |
|
815 | 815 | params = self._readexact(paramssize) |
|
816 | 816 | params = self._processallparams(params) |
|
817 | 817 | return params |
|
818 | 818 | |
|
819 | 819 | def _processallparams(self, paramsblock): |
|
820 | 820 | """""" |
|
821 | 821 | params = util.sortdict() |
|
822 | 822 | for p in paramsblock.split(b' '): |
|
823 | 823 | p = p.split(b'=', 1) |
|
824 | 824 | p = [urlreq.unquote(i) for i in p] |
|
825 | 825 | if len(p) < 2: |
|
826 | 826 | p.append(None) |
|
827 | 827 | self._processparam(*p) |
|
828 | 828 | params[p[0]] = p[1] |
|
829 | 829 | return params |
|
830 | 830 | |
|
831 | 831 | def _processparam(self, name, value): |
|
832 | 832 | """process a parameter, applying its effect if needed |
|
833 | 833 | |
|
834 | 834 | Parameter starting with a lower case letter are advisory and will be |
|
835 | 835 | ignored when unknown. Those starting with an upper case letter are |
|
836 | 836 | mandatory and will this function will raise a KeyError when unknown. |
|
837 | 837 | |
|
838 | 838 | Note: no option are currently supported. Any input will be either |
|
839 | 839 | ignored or failing. |
|
840 | 840 | """ |
|
841 | 841 | if not name: |
|
842 | 842 | raise ValueError('empty parameter name') |
|
843 | 843 | if name[0:1] not in pycompat.bytestr( |
|
844 | 844 | string.ascii_letters # pytype: disable=wrong-arg-types |
|
845 | 845 | ): |
|
846 | 846 | raise ValueError('non letter first character: %s' % name) |
|
847 | 847 | try: |
|
848 | 848 | handler = b2streamparamsmap[name.lower()] |
|
849 | 849 | except KeyError: |
|
850 | 850 | if name[0:1].islower(): |
|
851 | 851 | indebug(self.ui, b"ignoring unknown parameter %s" % name) |
|
852 | 852 | else: |
|
853 | 853 | raise error.BundleUnknownFeatureError(params=(name,)) |
|
854 | 854 | else: |
|
855 | 855 | handler(self, name, value) |
|
856 | 856 | |
|
857 | 857 | def _forwardchunks(self): |
|
858 | 858 | """utility to transfer a bundle2 as binary |
|
859 | 859 | |
|
860 | 860 | This is made necessary by the fact the 'getbundle' command over 'ssh' |
|
861 | 861 | have no way to know then the reply end, relying on the bundle to be |
|
862 | 862 | interpreted to know its end. This is terrible and we are sorry, but we |
|
863 | 863 | needed to move forward to get general delta enabled. |
|
864 | 864 | """ |
|
865 | 865 | yield self._magicstring |
|
866 | 866 | assert 'params' not in vars(self) |
|
867 | 867 | paramssize = self._unpack(_fstreamparamsize)[0] |
|
868 | 868 | if paramssize < 0: |
|
869 | 869 | raise error.BundleValueError( |
|
870 | 870 | b'negative bundle param size: %i' % paramssize |
|
871 | 871 | ) |
|
872 | 872 | if paramssize: |
|
873 | 873 | params = self._readexact(paramssize) |
|
874 | 874 | self._processallparams(params) |
|
875 | 875 | # The payload itself is decompressed below, so drop |
|
876 | 876 | # the compression parameter passed down to compensate. |
|
877 | 877 | outparams = [] |
|
878 | 878 | for p in params.split(b' '): |
|
879 | 879 | k, v = p.split(b'=', 1) |
|
880 | 880 | if k.lower() != b'compression': |
|
881 | 881 | outparams.append(p) |
|
882 | 882 | outparams = b' '.join(outparams) |
|
883 | 883 | yield _pack(_fstreamparamsize, len(outparams)) |
|
884 | 884 | yield outparams |
|
885 | 885 | else: |
|
886 | 886 | yield _pack(_fstreamparamsize, paramssize) |
|
887 | 887 | # From there, payload might need to be decompressed |
|
888 | 888 | self._fp = self._compengine.decompressorreader(self._fp) |
|
889 | 889 | emptycount = 0 |
|
890 | 890 | while emptycount < 2: |
|
891 | 891 | # so we can brainlessly loop |
|
892 | 892 | assert _fpartheadersize == _fpayloadsize |
|
893 | 893 | size = self._unpack(_fpartheadersize)[0] |
|
894 | 894 | yield _pack(_fpartheadersize, size) |
|
895 | 895 | if size: |
|
896 | 896 | emptycount = 0 |
|
897 | 897 | else: |
|
898 | 898 | emptycount += 1 |
|
899 | 899 | continue |
|
900 | 900 | if size == flaginterrupt: |
|
901 | 901 | continue |
|
902 | 902 | elif size < 0: |
|
903 | 903 | raise error.BundleValueError(b'negative chunk size: %i') |
|
904 | 904 | yield self._readexact(size) |
|
905 | 905 | |
|
906 | 906 | def iterparts(self, seekable=False): |
|
907 | 907 | """yield all parts contained in the stream""" |
|
908 | 908 | cls = seekableunbundlepart if seekable else unbundlepart |
|
909 | 909 | # make sure param have been loaded |
|
910 | 910 | self.params |
|
911 | 911 | # From there, payload need to be decompressed |
|
912 | 912 | self._fp = self._compengine.decompressorreader(self._fp) |
|
913 | 913 | indebug(self.ui, b'start extraction of bundle2 parts') |
|
914 | 914 | headerblock = self._readpartheader() |
|
915 | 915 | while headerblock is not None: |
|
916 | 916 | part = cls(self.ui, headerblock, self._fp) |
|
917 | 917 | yield part |
|
918 | 918 | # Ensure part is fully consumed so we can start reading the next |
|
919 | 919 | # part. |
|
920 | 920 | part.consume() |
|
921 | 921 | |
|
922 | 922 | headerblock = self._readpartheader() |
|
923 | 923 | indebug(self.ui, b'end of bundle2 stream') |
|
924 | 924 | |
|
925 | 925 | def _readpartheader(self): |
|
926 | 926 | """reads a part header size and return the bytes blob |
|
927 | 927 | |
|
928 | 928 | returns None if empty""" |
|
929 | 929 | headersize = self._unpack(_fpartheadersize)[0] |
|
930 | 930 | if headersize < 0: |
|
931 | 931 | raise error.BundleValueError( |
|
932 | 932 | b'negative part header size: %i' % headersize |
|
933 | 933 | ) |
|
934 | 934 | indebug(self.ui, b'part header size: %i' % headersize) |
|
935 | 935 | if headersize: |
|
936 | 936 | return self._readexact(headersize) |
|
937 | 937 | return None |
|
938 | 938 | |
|
939 | 939 | def compressed(self): |
|
940 | 940 | self.params # load params |
|
941 | 941 | return self._compressed |
|
942 | 942 | |
|
943 | 943 | def close(self): |
|
944 | 944 | """close underlying file""" |
|
945 | 945 | if util.safehasattr(self._fp, 'close'): |
|
946 | 946 | return self._fp.close() |
|
947 | 947 | |
|
948 | 948 | |
|
949 | 949 | formatmap = {b'20': unbundle20} |
|
950 | 950 | |
|
951 | 951 | b2streamparamsmap = {} |
|
952 | 952 | |
|
953 | 953 | |
|
954 | 954 | def b2streamparamhandler(name): |
|
955 | 955 | """register a handler for a stream level parameter""" |
|
956 | 956 | |
|
957 | 957 | def decorator(func): |
|
958 | 958 | assert name not in formatmap |
|
959 | 959 | b2streamparamsmap[name] = func |
|
960 | 960 | return func |
|
961 | 961 | |
|
962 | 962 | return decorator |
|
963 | 963 | |
|
964 | 964 | |
|
965 | 965 | @b2streamparamhandler(b'compression') |
|
966 | 966 | def processcompression(unbundler, param, value): |
|
967 | 967 | """read compression parameter and install payload decompression""" |
|
968 | 968 | if value not in util.compengines.supportedbundletypes: |
|
969 | 969 | raise error.BundleUnknownFeatureError(params=(param,), values=(value,)) |
|
970 | 970 | unbundler._compengine = util.compengines.forbundletype(value) |
|
971 | 971 | if value is not None: |
|
972 | 972 | unbundler._compressed = True |
|
973 | 973 | |
|
974 | 974 | |
|
975 | 975 | class bundlepart(object): |
|
976 | 976 | """A bundle2 part contains application level payload |
|
977 | 977 | |
|
978 | 978 | The part `type` is used to route the part to the application level |
|
979 | 979 | handler. |
|
980 | 980 | |
|
981 | 981 | The part payload is contained in ``part.data``. It could be raw bytes or a |
|
982 | 982 | generator of byte chunks. |
|
983 | 983 | |
|
984 | 984 | You can add parameters to the part using the ``addparam`` method. |
|
985 | 985 | Parameters can be either mandatory (default) or advisory. Remote side |
|
986 | 986 | should be able to safely ignore the advisory ones. |
|
987 | 987 | |
|
988 | 988 | Both data and parameters cannot be modified after the generation has begun. |
|
989 | 989 | """ |
|
990 | 990 | |
|
991 | 991 | def __init__( |
|
992 | 992 | self, |
|
993 | 993 | parttype, |
|
994 | 994 | mandatoryparams=(), |
|
995 | 995 | advisoryparams=(), |
|
996 | 996 | data=b'', |
|
997 | 997 | mandatory=True, |
|
998 | 998 | ): |
|
999 | 999 | validateparttype(parttype) |
|
1000 | 1000 | self.id = None |
|
1001 | 1001 | self.type = parttype |
|
1002 | 1002 | self._data = data |
|
1003 | 1003 | self._mandatoryparams = list(mandatoryparams) |
|
1004 | 1004 | self._advisoryparams = list(advisoryparams) |
|
1005 | 1005 | # checking for duplicated entries |
|
1006 | 1006 | self._seenparams = set() |
|
1007 | 1007 | for pname, __ in self._mandatoryparams + self._advisoryparams: |
|
1008 | 1008 | if pname in self._seenparams: |
|
1009 | 1009 | raise error.ProgrammingError(b'duplicated params: %s' % pname) |
|
1010 | 1010 | self._seenparams.add(pname) |
|
1011 | 1011 | # status of the part's generation: |
|
1012 | 1012 | # - None: not started, |
|
1013 | 1013 | # - False: currently generated, |
|
1014 | 1014 | # - True: generation done. |
|
1015 | 1015 | self._generated = None |
|
1016 | 1016 | self.mandatory = mandatory |
|
1017 | 1017 | |
|
1018 | 1018 | def __repr__(self): |
|
1019 | 1019 | cls = "%s.%s" % (self.__class__.__module__, self.__class__.__name__) |
|
1020 | 1020 | return '<%s object at %x; id: %s; type: %s; mandatory: %s>' % ( |
|
1021 | 1021 | cls, |
|
1022 | 1022 | id(self), |
|
1023 | 1023 | self.id, |
|
1024 | 1024 | self.type, |
|
1025 | 1025 | self.mandatory, |
|
1026 | 1026 | ) |
|
1027 | 1027 | |
|
1028 | 1028 | def copy(self): |
|
1029 | 1029 | """return a copy of the part |
|
1030 | 1030 | |
|
1031 | 1031 | The new part have the very same content but no partid assigned yet. |
|
1032 | 1032 | Parts with generated data cannot be copied.""" |
|
1033 | 1033 | assert not util.safehasattr(self.data, 'next') |
|
1034 | 1034 | return self.__class__( |
|
1035 | 1035 | self.type, |
|
1036 | 1036 | self._mandatoryparams, |
|
1037 | 1037 | self._advisoryparams, |
|
1038 | 1038 | self._data, |
|
1039 | 1039 | self.mandatory, |
|
1040 | 1040 | ) |
|
1041 | 1041 | |
|
1042 | 1042 | # methods used to defines the part content |
|
1043 | 1043 | @property |
|
1044 | 1044 | def data(self): |
|
1045 | 1045 | return self._data |
|
1046 | 1046 | |
|
1047 | 1047 | @data.setter |
|
1048 | 1048 | def data(self, data): |
|
1049 | 1049 | if self._generated is not None: |
|
1050 | 1050 | raise error.ReadOnlyPartError(b'part is being generated') |
|
1051 | 1051 | self._data = data |
|
1052 | 1052 | |
|
1053 | 1053 | @property |
|
1054 | 1054 | def mandatoryparams(self): |
|
1055 | 1055 | # make it an immutable tuple to force people through ``addparam`` |
|
1056 | 1056 | return tuple(self._mandatoryparams) |
|
1057 | 1057 | |
|
1058 | 1058 | @property |
|
1059 | 1059 | def advisoryparams(self): |
|
1060 | 1060 | # make it an immutable tuple to force people through ``addparam`` |
|
1061 | 1061 | return tuple(self._advisoryparams) |
|
1062 | 1062 | |
|
1063 | 1063 | def addparam(self, name, value=b'', mandatory=True): |
|
1064 | 1064 | """add a parameter to the part |
|
1065 | 1065 | |
|
1066 | 1066 | If 'mandatory' is set to True, the remote handler must claim support |
|
1067 | 1067 | for this parameter or the unbundling will be aborted. |
|
1068 | 1068 | |
|
1069 | 1069 | The 'name' and 'value' cannot exceed 255 bytes each. |
|
1070 | 1070 | """ |
|
1071 | 1071 | if self._generated is not None: |
|
1072 | 1072 | raise error.ReadOnlyPartError(b'part is being generated') |
|
1073 | 1073 | if name in self._seenparams: |
|
1074 | 1074 | raise ValueError(b'duplicated params: %s' % name) |
|
1075 | 1075 | self._seenparams.add(name) |
|
1076 | 1076 | params = self._advisoryparams |
|
1077 | 1077 | if mandatory: |
|
1078 | 1078 | params = self._mandatoryparams |
|
1079 | 1079 | params.append((name, value)) |
|
1080 | 1080 | |
|
1081 | 1081 | # methods used to generates the bundle2 stream |
|
1082 | 1082 | def getchunks(self, ui): |
|
1083 | 1083 | if self._generated is not None: |
|
1084 | 1084 | raise error.ProgrammingError(b'part can only be consumed once') |
|
1085 | 1085 | self._generated = False |
|
1086 | 1086 | |
|
1087 | 1087 | if ui.debugflag: |
|
1088 | 1088 | msg = [b'bundle2-output-part: "%s"' % self.type] |
|
1089 | 1089 | if not self.mandatory: |
|
1090 | 1090 | msg.append(b' (advisory)') |
|
1091 | 1091 | nbmp = len(self.mandatoryparams) |
|
1092 | 1092 | nbap = len(self.advisoryparams) |
|
1093 | 1093 | if nbmp or nbap: |
|
1094 | 1094 | msg.append(b' (params:') |
|
1095 | 1095 | if nbmp: |
|
1096 | 1096 | msg.append(b' %i mandatory' % nbmp) |
|
1097 | 1097 | if nbap: |
|
1098 | 1098 | msg.append(b' %i advisory' % nbmp) |
|
1099 | 1099 | msg.append(b')') |
|
1100 | 1100 | if not self.data: |
|
1101 | 1101 | msg.append(b' empty payload') |
|
1102 | 1102 | elif util.safehasattr(self.data, 'next') or util.safehasattr( |
|
1103 | 1103 | self.data, b'__next__' |
|
1104 | 1104 | ): |
|
1105 | 1105 | msg.append(b' streamed payload') |
|
1106 | 1106 | else: |
|
1107 | 1107 | msg.append(b' %i bytes payload' % len(self.data)) |
|
1108 | 1108 | msg.append(b'\n') |
|
1109 | 1109 | ui.debug(b''.join(msg)) |
|
1110 | 1110 | |
|
1111 | 1111 | #### header |
|
1112 | 1112 | if self.mandatory: |
|
1113 | 1113 | parttype = self.type.upper() |
|
1114 | 1114 | else: |
|
1115 | 1115 | parttype = self.type.lower() |
|
1116 | 1116 | outdebug(ui, b'part %s: "%s"' % (pycompat.bytestr(self.id), parttype)) |
|
1117 | 1117 | ## parttype |
|
1118 | 1118 | header = [ |
|
1119 | 1119 | _pack(_fparttypesize, len(parttype)), |
|
1120 | 1120 | parttype, |
|
1121 | 1121 | _pack(_fpartid, self.id), |
|
1122 | 1122 | ] |
|
1123 | 1123 | ## parameters |
|
1124 | 1124 | # count |
|
1125 | 1125 | manpar = self.mandatoryparams |
|
1126 | 1126 | advpar = self.advisoryparams |
|
1127 | 1127 | header.append(_pack(_fpartparamcount, len(manpar), len(advpar))) |
|
1128 | 1128 | # size |
|
1129 | 1129 | parsizes = [] |
|
1130 | 1130 | for key, value in manpar: |
|
1131 | 1131 | parsizes.append(len(key)) |
|
1132 | 1132 | parsizes.append(len(value)) |
|
1133 | 1133 | for key, value in advpar: |
|
1134 | 1134 | parsizes.append(len(key)) |
|
1135 | 1135 | parsizes.append(len(value)) |
|
1136 | 1136 | paramsizes = _pack(_makefpartparamsizes(len(parsizes) // 2), *parsizes) |
|
1137 | 1137 | header.append(paramsizes) |
|
1138 | 1138 | # key, value |
|
1139 | 1139 | for key, value in manpar: |
|
1140 | 1140 | header.append(key) |
|
1141 | 1141 | header.append(value) |
|
1142 | 1142 | for key, value in advpar: |
|
1143 | 1143 | header.append(key) |
|
1144 | 1144 | header.append(value) |
|
1145 | 1145 | ## finalize header |
|
1146 | 1146 | try: |
|
1147 | 1147 | headerchunk = b''.join(header) |
|
1148 | 1148 | except TypeError: |
|
1149 | 1149 | raise TypeError( |
|
1150 | 1150 | 'Found a non-bytes trying to ' |
|
1151 | 1151 | 'build bundle part header: %r' % header |
|
1152 | 1152 | ) |
|
1153 | 1153 | outdebug(ui, b'header chunk size: %i' % len(headerchunk)) |
|
1154 | 1154 | yield _pack(_fpartheadersize, len(headerchunk)) |
|
1155 | 1155 | yield headerchunk |
|
1156 | 1156 | ## payload |
|
1157 | 1157 | try: |
|
1158 | 1158 | for chunk in self._payloadchunks(): |
|
1159 | 1159 | outdebug(ui, b'payload chunk size: %i' % len(chunk)) |
|
1160 | 1160 | yield _pack(_fpayloadsize, len(chunk)) |
|
1161 | 1161 | yield chunk |
|
1162 | 1162 | except GeneratorExit: |
|
1163 | 1163 | # GeneratorExit means that nobody is listening for our |
|
1164 | 1164 | # results anyway, so just bail quickly rather than trying |
|
1165 | 1165 | # to produce an error part. |
|
1166 | 1166 | ui.debug(b'bundle2-generatorexit\n') |
|
1167 | 1167 | raise |
|
1168 | 1168 | except BaseException as exc: |
|
1169 | 1169 | bexc = stringutil.forcebytestr(exc) |
|
1170 | 1170 | # backup exception data for later |
|
1171 | 1171 | ui.debug( |
|
1172 | 1172 | b'bundle2-input-stream-interrupt: encoding exception %s' % bexc |
|
1173 | 1173 | ) |
|
1174 | 1174 | tb = sys.exc_info()[2] |
|
1175 | 1175 | msg = b'unexpected error: %s' % bexc |
|
1176 | 1176 | interpart = bundlepart( |
|
1177 | 1177 | b'error:abort', [(b'message', msg)], mandatory=False |
|
1178 | 1178 | ) |
|
1179 | 1179 | interpart.id = 0 |
|
1180 | 1180 | yield _pack(_fpayloadsize, -1) |
|
1181 | 1181 | for chunk in interpart.getchunks(ui=ui): |
|
1182 | 1182 | yield chunk |
|
1183 | 1183 | outdebug(ui, b'closing payload chunk') |
|
1184 | 1184 | # abort current part payload |
|
1185 | 1185 | yield _pack(_fpayloadsize, 0) |
|
1186 | 1186 | pycompat.raisewithtb(exc, tb) |
|
1187 | 1187 | # end of payload |
|
1188 | 1188 | outdebug(ui, b'closing payload chunk') |
|
1189 | 1189 | yield _pack(_fpayloadsize, 0) |
|
1190 | 1190 | self._generated = True |
|
1191 | 1191 | |
|
1192 | 1192 | def _payloadchunks(self): |
|
1193 | 1193 | """yield chunks of a the part payload |
|
1194 | 1194 | |
|
1195 | 1195 | Exists to handle the different methods to provide data to a part.""" |
|
1196 | 1196 | # we only support fixed size data now. |
|
1197 | 1197 | # This will be improved in the future. |
|
1198 | 1198 | if util.safehasattr(self.data, 'next') or util.safehasattr( |
|
1199 | 1199 | self.data, b'__next__' |
|
1200 | 1200 | ): |
|
1201 | 1201 | buff = util.chunkbuffer(self.data) |
|
1202 | 1202 | chunk = buff.read(preferedchunksize) |
|
1203 | 1203 | while chunk: |
|
1204 | 1204 | yield chunk |
|
1205 | 1205 | chunk = buff.read(preferedchunksize) |
|
1206 | 1206 | elif len(self.data): |
|
1207 | 1207 | yield self.data |
|
1208 | 1208 | |
|
1209 | 1209 | |
|
1210 | 1210 | flaginterrupt = -1 |
|
1211 | 1211 | |
|
1212 | 1212 | |
|
1213 | 1213 | class interrupthandler(unpackermixin): |
|
1214 | 1214 | """read one part and process it with restricted capability |
|
1215 | 1215 | |
|
1216 | 1216 | This allows to transmit exception raised on the producer size during part |
|
1217 | 1217 | iteration while the consumer is reading a part. |
|
1218 | 1218 | |
|
1219 | 1219 | Part processed in this manner only have access to a ui object,""" |
|
1220 | 1220 | |
|
1221 | 1221 | def __init__(self, ui, fp): |
|
1222 | 1222 | super(interrupthandler, self).__init__(fp) |
|
1223 | 1223 | self.ui = ui |
|
1224 | 1224 | |
|
1225 | 1225 | def _readpartheader(self): |
|
1226 | 1226 | """reads a part header size and return the bytes blob |
|
1227 | 1227 | |
|
1228 | 1228 | returns None if empty""" |
|
1229 | 1229 | headersize = self._unpack(_fpartheadersize)[0] |
|
1230 | 1230 | if headersize < 0: |
|
1231 | 1231 | raise error.BundleValueError( |
|
1232 | 1232 | b'negative part header size: %i' % headersize |
|
1233 | 1233 | ) |
|
1234 | 1234 | indebug(self.ui, b'part header size: %i\n' % headersize) |
|
1235 | 1235 | if headersize: |
|
1236 | 1236 | return self._readexact(headersize) |
|
1237 | 1237 | return None |
|
1238 | 1238 | |
|
1239 | 1239 | def __call__(self): |
|
1240 | 1240 | |
|
1241 | 1241 | self.ui.debug( |
|
1242 | 1242 | b'bundle2-input-stream-interrupt: opening out of band context\n' |
|
1243 | 1243 | ) |
|
1244 | 1244 | indebug(self.ui, b'bundle2 stream interruption, looking for a part.') |
|
1245 | 1245 | headerblock = self._readpartheader() |
|
1246 | 1246 | if headerblock is None: |
|
1247 | 1247 | indebug(self.ui, b'no part found during interruption.') |
|
1248 | 1248 | return |
|
1249 | 1249 | part = unbundlepart(self.ui, headerblock, self._fp) |
|
1250 | 1250 | op = interruptoperation(self.ui) |
|
1251 | 1251 | hardabort = False |
|
1252 | 1252 | try: |
|
1253 | 1253 | _processpart(op, part) |
|
1254 | 1254 | except (SystemExit, KeyboardInterrupt): |
|
1255 | 1255 | hardabort = True |
|
1256 | 1256 | raise |
|
1257 | 1257 | finally: |
|
1258 | 1258 | if not hardabort: |
|
1259 | 1259 | part.consume() |
|
1260 | 1260 | self.ui.debug( |
|
1261 | 1261 | b'bundle2-input-stream-interrupt: closing out of band context\n' |
|
1262 | 1262 | ) |
|
1263 | 1263 | |
|
1264 | 1264 | |
|
1265 | 1265 | class interruptoperation(object): |
|
1266 | 1266 | """A limited operation to be use by part handler during interruption |
|
1267 | 1267 | |
|
1268 | 1268 | It only have access to an ui object. |
|
1269 | 1269 | """ |
|
1270 | 1270 | |
|
1271 | 1271 | def __init__(self, ui): |
|
1272 | 1272 | self.ui = ui |
|
1273 | 1273 | self.reply = None |
|
1274 | 1274 | self.captureoutput = False |
|
1275 | 1275 | |
|
1276 | 1276 | @property |
|
1277 | 1277 | def repo(self): |
|
1278 | 1278 | raise error.ProgrammingError(b'no repo access from stream interruption') |
|
1279 | 1279 | |
|
1280 | 1280 | def gettransaction(self): |
|
1281 | 1281 | raise TransactionUnavailable(b'no repo access from stream interruption') |
|
1282 | 1282 | |
|
1283 | 1283 | |
|
1284 | 1284 | def decodepayloadchunks(ui, fh): |
|
1285 | 1285 | """Reads bundle2 part payload data into chunks. |
|
1286 | 1286 | |
|
1287 | 1287 | Part payload data consists of framed chunks. This function takes |
|
1288 | 1288 | a file handle and emits those chunks. |
|
1289 | 1289 | """ |
|
1290 | 1290 | dolog = ui.configbool(b'devel', b'bundle2.debug') |
|
1291 | 1291 | debug = ui.debug |
|
1292 | 1292 | |
|
1293 | 1293 | headerstruct = struct.Struct(_fpayloadsize) |
|
1294 | 1294 | headersize = headerstruct.size |
|
1295 | 1295 | unpack = headerstruct.unpack |
|
1296 | 1296 | |
|
1297 | 1297 | readexactly = changegroup.readexactly |
|
1298 | 1298 | read = fh.read |
|
1299 | 1299 | |
|
1300 | 1300 | chunksize = unpack(readexactly(fh, headersize))[0] |
|
1301 | 1301 | indebug(ui, b'payload chunk size: %i' % chunksize) |
|
1302 | 1302 | |
|
1303 | 1303 | # changegroup.readexactly() is inlined below for performance. |
|
1304 | 1304 | while chunksize: |
|
1305 | 1305 | if chunksize >= 0: |
|
1306 | 1306 | s = read(chunksize) |
|
1307 | 1307 | if len(s) < chunksize: |
|
1308 | 1308 | raise error.Abort( |
|
1309 | 1309 | _( |
|
1310 | 1310 | b'stream ended unexpectedly ' |
|
1311 | 1311 | b' (got %d bytes, expected %d)' |
|
1312 | 1312 | ) |
|
1313 | 1313 | % (len(s), chunksize) |
|
1314 | 1314 | ) |
|
1315 | 1315 | |
|
1316 | 1316 | yield s |
|
1317 | 1317 | elif chunksize == flaginterrupt: |
|
1318 | 1318 | # Interrupt "signal" detected. The regular stream is interrupted |
|
1319 | 1319 | # and a bundle2 part follows. Consume it. |
|
1320 | 1320 | interrupthandler(ui, fh)() |
|
1321 | 1321 | else: |
|
1322 | 1322 | raise error.BundleValueError( |
|
1323 | 1323 | b'negative payload chunk size: %s' % chunksize |
|
1324 | 1324 | ) |
|
1325 | 1325 | |
|
1326 | 1326 | s = read(headersize) |
|
1327 | 1327 | if len(s) < headersize: |
|
1328 | 1328 | raise error.Abort( |
|
1329 | 1329 | _(b'stream ended unexpectedly (got %d bytes, expected %d)') |
|
1330 | 1330 | % (len(s), chunksize) |
|
1331 | 1331 | ) |
|
1332 | 1332 | |
|
1333 | 1333 | chunksize = unpack(s)[0] |
|
1334 | 1334 | |
|
1335 | 1335 | # indebug() inlined for performance. |
|
1336 | 1336 | if dolog: |
|
1337 | 1337 | debug(b'bundle2-input: payload chunk size: %i\n' % chunksize) |
|
1338 | 1338 | |
|
1339 | 1339 | |
|
1340 | 1340 | class unbundlepart(unpackermixin): |
|
1341 | 1341 | """a bundle part read from a bundle""" |
|
1342 | 1342 | |
|
1343 | 1343 | def __init__(self, ui, header, fp): |
|
1344 | 1344 | super(unbundlepart, self).__init__(fp) |
|
1345 | 1345 | self._seekable = util.safehasattr(fp, 'seek') and util.safehasattr( |
|
1346 | 1346 | fp, b'tell' |
|
1347 | 1347 | ) |
|
1348 | 1348 | self.ui = ui |
|
1349 | 1349 | # unbundle state attr |
|
1350 | 1350 | self._headerdata = header |
|
1351 | 1351 | self._headeroffset = 0 |
|
1352 | 1352 | self._initialized = False |
|
1353 | 1353 | self.consumed = False |
|
1354 | 1354 | # part data |
|
1355 | 1355 | self.id = None |
|
1356 | 1356 | self.type = None |
|
1357 | 1357 | self.mandatoryparams = None |
|
1358 | 1358 | self.advisoryparams = None |
|
1359 | 1359 | self.params = None |
|
1360 | 1360 | self.mandatorykeys = () |
|
1361 | 1361 | self._readheader() |
|
1362 | 1362 | self._mandatory = None |
|
1363 | 1363 | self._pos = 0 |
|
1364 | 1364 | |
|
1365 | 1365 | def _fromheader(self, size): |
|
1366 | 1366 | """return the next <size> byte from the header""" |
|
1367 | 1367 | offset = self._headeroffset |
|
1368 | 1368 | data = self._headerdata[offset : (offset + size)] |
|
1369 | 1369 | self._headeroffset = offset + size |
|
1370 | 1370 | return data |
|
1371 | 1371 | |
|
1372 | 1372 | def _unpackheader(self, format): |
|
1373 | 1373 | """read given format from header |
|
1374 | 1374 | |
|
1375 | 1375 | This automatically compute the size of the format to read.""" |
|
1376 | 1376 | data = self._fromheader(struct.calcsize(format)) |
|
1377 | 1377 | return _unpack(format, data) |
|
1378 | 1378 | |
|
1379 | 1379 | def _initparams(self, mandatoryparams, advisoryparams): |
|
1380 | 1380 | """internal function to setup all logic related parameters""" |
|
1381 | 1381 | # make it read only to prevent people touching it by mistake. |
|
1382 | 1382 | self.mandatoryparams = tuple(mandatoryparams) |
|
1383 | 1383 | self.advisoryparams = tuple(advisoryparams) |
|
1384 | 1384 | # user friendly UI |
|
1385 | 1385 | self.params = util.sortdict(self.mandatoryparams) |
|
1386 | 1386 | self.params.update(self.advisoryparams) |
|
1387 | 1387 | self.mandatorykeys = frozenset(p[0] for p in mandatoryparams) |
|
1388 | 1388 | |
|
1389 | 1389 | def _readheader(self): |
|
1390 | 1390 | """read the header and setup the object""" |
|
1391 | 1391 | typesize = self._unpackheader(_fparttypesize)[0] |
|
1392 | 1392 | self.type = self._fromheader(typesize) |
|
1393 | 1393 | indebug(self.ui, b'part type: "%s"' % self.type) |
|
1394 | 1394 | self.id = self._unpackheader(_fpartid)[0] |
|
1395 | 1395 | indebug(self.ui, b'part id: "%s"' % pycompat.bytestr(self.id)) |
|
1396 | 1396 | # extract mandatory bit from type |
|
1397 | 1397 | self.mandatory = self.type != self.type.lower() |
|
1398 | 1398 | self.type = self.type.lower() |
|
1399 | 1399 | ## reading parameters |
|
1400 | 1400 | # param count |
|
1401 | 1401 | mancount, advcount = self._unpackheader(_fpartparamcount) |
|
1402 | 1402 | indebug(self.ui, b'part parameters: %i' % (mancount + advcount)) |
|
1403 | 1403 | # param size |
|
1404 | 1404 | fparamsizes = _makefpartparamsizes(mancount + advcount) |
|
1405 | 1405 | paramsizes = self._unpackheader(fparamsizes) |
|
1406 | 1406 | # make it a list of couple again |
|
1407 | 1407 | paramsizes = list(zip(paramsizes[::2], paramsizes[1::2])) |
|
1408 | 1408 | # split mandatory from advisory |
|
1409 | 1409 | mansizes = paramsizes[:mancount] |
|
1410 | 1410 | advsizes = paramsizes[mancount:] |
|
1411 | 1411 | # retrieve param value |
|
1412 | 1412 | manparams = [] |
|
1413 | 1413 | for key, value in mansizes: |
|
1414 | 1414 | manparams.append((self._fromheader(key), self._fromheader(value))) |
|
1415 | 1415 | advparams = [] |
|
1416 | 1416 | for key, value in advsizes: |
|
1417 | 1417 | advparams.append((self._fromheader(key), self._fromheader(value))) |
|
1418 | 1418 | self._initparams(manparams, advparams) |
|
1419 | 1419 | ## part payload |
|
1420 | 1420 | self._payloadstream = util.chunkbuffer(self._payloadchunks()) |
|
1421 | 1421 | # we read the data, tell it |
|
1422 | 1422 | self._initialized = True |
|
1423 | 1423 | |
|
1424 | 1424 | def _payloadchunks(self): |
|
1425 | 1425 | """Generator of decoded chunks in the payload.""" |
|
1426 | 1426 | return decodepayloadchunks(self.ui, self._fp) |
|
1427 | 1427 | |
|
1428 | 1428 | def consume(self): |
|
1429 | 1429 | """Read the part payload until completion. |
|
1430 | 1430 | |
|
1431 | 1431 | By consuming the part data, the underlying stream read offset will |
|
1432 | 1432 | be advanced to the next part (or end of stream). |
|
1433 | 1433 | """ |
|
1434 | 1434 | if self.consumed: |
|
1435 | 1435 | return |
|
1436 | 1436 | |
|
1437 | 1437 | chunk = self.read(32768) |
|
1438 | 1438 | while chunk: |
|
1439 | 1439 | self._pos += len(chunk) |
|
1440 | 1440 | chunk = self.read(32768) |
|
1441 | 1441 | |
|
1442 | 1442 | def read(self, size=None): |
|
1443 | 1443 | """read payload data""" |
|
1444 | 1444 | if not self._initialized: |
|
1445 | 1445 | self._readheader() |
|
1446 | 1446 | if size is None: |
|
1447 | 1447 | data = self._payloadstream.read() |
|
1448 | 1448 | else: |
|
1449 | 1449 | data = self._payloadstream.read(size) |
|
1450 | 1450 | self._pos += len(data) |
|
1451 | 1451 | if size is None or len(data) < size: |
|
1452 | 1452 | if not self.consumed and self._pos: |
|
1453 | 1453 | self.ui.debug( |
|
1454 | 1454 | b'bundle2-input-part: total payload size %i\n' % self._pos |
|
1455 | 1455 | ) |
|
1456 | 1456 | self.consumed = True |
|
1457 | 1457 | return data |
|
1458 | 1458 | |
|
1459 | 1459 | |
|
1460 | 1460 | class seekableunbundlepart(unbundlepart): |
|
1461 | 1461 | """A bundle2 part in a bundle that is seekable. |
|
1462 | 1462 | |
|
1463 | 1463 | Regular ``unbundlepart`` instances can only be read once. This class |
|
1464 | 1464 | extends ``unbundlepart`` to enable bi-directional seeking within the |
|
1465 | 1465 | part. |
|
1466 | 1466 | |
|
1467 | 1467 | Bundle2 part data consists of framed chunks. Offsets when seeking |
|
1468 | 1468 | refer to the decoded data, not the offsets in the underlying bundle2 |
|
1469 | 1469 | stream. |
|
1470 | 1470 | |
|
1471 | 1471 | To facilitate quickly seeking within the decoded data, instances of this |
|
1472 | 1472 | class maintain a mapping between offsets in the underlying stream and |
|
1473 | 1473 | the decoded payload. This mapping will consume memory in proportion |
|
1474 | 1474 | to the number of chunks within the payload (which almost certainly |
|
1475 | 1475 | increases in proportion with the size of the part). |
|
1476 | 1476 | """ |
|
1477 | 1477 | |
|
1478 | 1478 | def __init__(self, ui, header, fp): |
|
1479 | 1479 | # (payload, file) offsets for chunk starts. |
|
1480 | 1480 | self._chunkindex = [] |
|
1481 | 1481 | |
|
1482 | 1482 | super(seekableunbundlepart, self).__init__(ui, header, fp) |
|
1483 | 1483 | |
|
1484 | 1484 | def _payloadchunks(self, chunknum=0): |
|
1485 | 1485 | '''seek to specified chunk and start yielding data''' |
|
1486 | 1486 | if len(self._chunkindex) == 0: |
|
1487 | 1487 | assert chunknum == 0, b'Must start with chunk 0' |
|
1488 | 1488 | self._chunkindex.append((0, self._tellfp())) |
|
1489 | 1489 | else: |
|
1490 | 1490 | assert chunknum < len(self._chunkindex), ( |
|
1491 | 1491 | b'Unknown chunk %d' % chunknum |
|
1492 | 1492 | ) |
|
1493 | 1493 | self._seekfp(self._chunkindex[chunknum][1]) |
|
1494 | 1494 | |
|
1495 | 1495 | pos = self._chunkindex[chunknum][0] |
|
1496 | 1496 | |
|
1497 | 1497 | for chunk in decodepayloadchunks(self.ui, self._fp): |
|
1498 | 1498 | chunknum += 1 |
|
1499 | 1499 | pos += len(chunk) |
|
1500 | 1500 | if chunknum == len(self._chunkindex): |
|
1501 | 1501 | self._chunkindex.append((pos, self._tellfp())) |
|
1502 | 1502 | |
|
1503 | 1503 | yield chunk |
|
1504 | 1504 | |
|
1505 | 1505 | def _findchunk(self, pos): |
|
1506 | 1506 | '''for a given payload position, return a chunk number and offset''' |
|
1507 | 1507 | for chunk, (ppos, fpos) in enumerate(self._chunkindex): |
|
1508 | 1508 | if ppos == pos: |
|
1509 | 1509 | return chunk, 0 |
|
1510 | 1510 | elif ppos > pos: |
|
1511 | 1511 | return chunk - 1, pos - self._chunkindex[chunk - 1][0] |
|
1512 | 1512 | raise ValueError(b'Unknown chunk') |
|
1513 | 1513 | |
|
1514 | 1514 | def tell(self): |
|
1515 | 1515 | return self._pos |
|
1516 | 1516 | |
|
1517 | 1517 | def seek(self, offset, whence=os.SEEK_SET): |
|
1518 | 1518 | if whence == os.SEEK_SET: |
|
1519 | 1519 | newpos = offset |
|
1520 | 1520 | elif whence == os.SEEK_CUR: |
|
1521 | 1521 | newpos = self._pos + offset |
|
1522 | 1522 | elif whence == os.SEEK_END: |
|
1523 | 1523 | if not self.consumed: |
|
1524 | 1524 | # Can't use self.consume() here because it advances self._pos. |
|
1525 | 1525 | chunk = self.read(32768) |
|
1526 | 1526 | while chunk: |
|
1527 | 1527 | chunk = self.read(32768) |
|
1528 | 1528 | newpos = self._chunkindex[-1][0] - offset |
|
1529 | 1529 | else: |
|
1530 | 1530 | raise ValueError(b'Unknown whence value: %r' % (whence,)) |
|
1531 | 1531 | |
|
1532 | 1532 | if newpos > self._chunkindex[-1][0] and not self.consumed: |
|
1533 | 1533 | # Can't use self.consume() here because it advances self._pos. |
|
1534 | 1534 | chunk = self.read(32768) |
|
1535 | 1535 | while chunk: |
|
1536 | 1536 | chunk = self.read(32668) |
|
1537 | 1537 | |
|
1538 | 1538 | if not 0 <= newpos <= self._chunkindex[-1][0]: |
|
1539 | 1539 | raise ValueError(b'Offset out of range') |
|
1540 | 1540 | |
|
1541 | 1541 | if self._pos != newpos: |
|
1542 | 1542 | chunk, internaloffset = self._findchunk(newpos) |
|
1543 | 1543 | self._payloadstream = util.chunkbuffer(self._payloadchunks(chunk)) |
|
1544 | 1544 | adjust = self.read(internaloffset) |
|
1545 | 1545 | if len(adjust) != internaloffset: |
|
1546 | 1546 | raise error.Abort(_(b'Seek failed\n')) |
|
1547 | 1547 | self._pos = newpos |
|
1548 | 1548 | |
|
1549 | 1549 | def _seekfp(self, offset, whence=0): |
|
1550 | 1550 | """move the underlying file pointer |
|
1551 | 1551 | |
|
1552 | 1552 | This method is meant for internal usage by the bundle2 protocol only. |
|
1553 | 1553 | They directly manipulate the low level stream including bundle2 level |
|
1554 | 1554 | instruction. |
|
1555 | 1555 | |
|
1556 | 1556 | Do not use it to implement higher-level logic or methods.""" |
|
1557 | 1557 | if self._seekable: |
|
1558 | 1558 | return self._fp.seek(offset, whence) |
|
1559 | 1559 | else: |
|
1560 | 1560 | raise NotImplementedError(_(b'File pointer is not seekable')) |
|
1561 | 1561 | |
|
1562 | 1562 | def _tellfp(self): |
|
1563 | 1563 | """return the file offset, or None if file is not seekable |
|
1564 | 1564 | |
|
1565 | 1565 | This method is meant for internal usage by the bundle2 protocol only. |
|
1566 | 1566 | They directly manipulate the low level stream including bundle2 level |
|
1567 | 1567 | instruction. |
|
1568 | 1568 | |
|
1569 | 1569 | Do not use it to implement higher-level logic or methods.""" |
|
1570 | 1570 | if self._seekable: |
|
1571 | 1571 | try: |
|
1572 | 1572 | return self._fp.tell() |
|
1573 | 1573 | except IOError as e: |
|
1574 | 1574 | if e.errno == errno.ESPIPE: |
|
1575 | 1575 | self._seekable = False |
|
1576 | 1576 | else: |
|
1577 | 1577 | raise |
|
1578 | 1578 | return None |
|
1579 | 1579 | |
|
1580 | 1580 | |
|
1581 | 1581 | # These are only the static capabilities. |
|
1582 | 1582 | # Check the 'getrepocaps' function for the rest. |
|
1583 | 1583 | capabilities = { |
|
1584 | 1584 | b'HG20': (), |
|
1585 | 1585 | b'bookmarks': (), |
|
1586 | 1586 | b'error': (b'abort', b'unsupportedcontent', b'pushraced', b'pushkey'), |
|
1587 | 1587 | b'listkeys': (), |
|
1588 | 1588 | b'pushkey': (), |
|
1589 | 1589 | b'digests': tuple(sorted(util.DIGESTS.keys())), |
|
1590 | 1590 | b'remote-changegroup': (b'http', b'https'), |
|
1591 | 1591 | b'hgtagsfnodes': (), |
|
1592 | 1592 | b'rev-branch-cache': (), |
|
1593 | 1593 | b'phases': (b'heads',), |
|
1594 | 1594 | b'stream': (b'v2',), |
|
1595 | 1595 | } |
|
1596 | 1596 | |
|
1597 | 1597 | |
|
1598 | 1598 | def getrepocaps(repo, allowpushback=False, role=None): |
|
1599 | 1599 | """return the bundle2 capabilities for a given repo |
|
1600 | 1600 | |
|
1601 | 1601 | Exists to allow extensions (like evolution) to mutate the capabilities. |
|
1602 | 1602 | |
|
1603 | 1603 | The returned value is used for servers advertising their capabilities as |
|
1604 | 1604 | well as clients advertising their capabilities to servers as part of |
|
1605 | 1605 | bundle2 requests. The ``role`` argument specifies which is which. |
|
1606 | 1606 | """ |
|
1607 | 1607 | if role not in (b'client', b'server'): |
|
1608 | 1608 | raise error.ProgrammingError(b'role argument must be client or server') |
|
1609 | 1609 | |
|
1610 | 1610 | caps = capabilities.copy() |
|
1611 | 1611 | caps[b'changegroup'] = tuple( |
|
1612 | 1612 | sorted(changegroup.supportedincomingversions(repo)) |
|
1613 | 1613 | ) |
|
1614 | 1614 | if obsolete.isenabled(repo, obsolete.exchangeopt): |
|
1615 | 1615 | supportedformat = tuple(b'V%i' % v for v in obsolete.formats) |
|
1616 | 1616 | caps[b'obsmarkers'] = supportedformat |
|
1617 | 1617 | if allowpushback: |
|
1618 | 1618 | caps[b'pushback'] = () |
|
1619 | 1619 | cpmode = repo.ui.config(b'server', b'concurrent-push-mode') |
|
1620 | 1620 | if cpmode == b'check-related': |
|
1621 | 1621 | caps[b'checkheads'] = (b'related',) |
|
1622 | 1622 | if b'phases' in repo.ui.configlist(b'devel', b'legacy.exchange'): |
|
1623 | 1623 | caps.pop(b'phases') |
|
1624 | 1624 | |
|
1625 | 1625 | # Don't advertise stream clone support in server mode if not configured. |
|
1626 | 1626 | if role == b'server': |
|
1627 | 1627 | streamsupported = repo.ui.configbool( |
|
1628 | 1628 | b'server', b'uncompressed', untrusted=True |
|
1629 | 1629 | ) |
|
1630 | 1630 | featuresupported = repo.ui.configbool(b'server', b'bundle2.stream') |
|
1631 | 1631 | |
|
1632 | 1632 | if not streamsupported or not featuresupported: |
|
1633 | 1633 | caps.pop(b'stream') |
|
1634 | 1634 | # Else always advertise support on client, because payload support |
|
1635 | 1635 | # should always be advertised. |
|
1636 | 1636 | |
|
1637 | 1637 | return caps |
|
1638 | 1638 | |
|
1639 | 1639 | |
|
1640 | 1640 | def bundle2caps(remote): |
|
1641 | 1641 | """return the bundle capabilities of a peer as dict""" |
|
1642 | 1642 | raw = remote.capable(b'bundle2') |
|
1643 | 1643 | if not raw and raw != b'': |
|
1644 | 1644 | return {} |
|
1645 | 1645 | capsblob = urlreq.unquote(remote.capable(b'bundle2')) |
|
1646 | 1646 | return decodecaps(capsblob) |
|
1647 | 1647 | |
|
1648 | 1648 | |
|
1649 | 1649 | def obsmarkersversion(caps): |
|
1650 | 1650 | """extract the list of supported obsmarkers versions from a bundle2caps dict |
|
1651 | 1651 | """ |
|
1652 | 1652 | obscaps = caps.get(b'obsmarkers', ()) |
|
1653 | 1653 | return [int(c[1:]) for c in obscaps if c.startswith(b'V')] |
|
1654 | 1654 | |
|
1655 | 1655 | |
|
1656 | 1656 | def writenewbundle( |
|
1657 | 1657 | ui, |
|
1658 | 1658 | repo, |
|
1659 | 1659 | source, |
|
1660 | 1660 | filename, |
|
1661 | 1661 | bundletype, |
|
1662 | 1662 | outgoing, |
|
1663 | 1663 | opts, |
|
1664 | 1664 | vfs=None, |
|
1665 | 1665 | compression=None, |
|
1666 | 1666 | compopts=None, |
|
1667 | 1667 | ): |
|
1668 | 1668 | if bundletype.startswith(b'HG10'): |
|
1669 | 1669 | cg = changegroup.makechangegroup(repo, outgoing, b'01', source) |
|
1670 | 1670 | return writebundle( |
|
1671 | 1671 | ui, |
|
1672 | 1672 | cg, |
|
1673 | 1673 | filename, |
|
1674 | 1674 | bundletype, |
|
1675 | 1675 | vfs=vfs, |
|
1676 | 1676 | compression=compression, |
|
1677 | 1677 | compopts=compopts, |
|
1678 | 1678 | ) |
|
1679 | 1679 | elif not bundletype.startswith(b'HG20'): |
|
1680 | 1680 | raise error.ProgrammingError(b'unknown bundle type: %s' % bundletype) |
|
1681 | 1681 | |
|
1682 | 1682 | caps = {} |
|
1683 | 1683 | if b'obsolescence' in opts: |
|
1684 | 1684 | caps[b'obsmarkers'] = (b'V1',) |
|
1685 | 1685 | bundle = bundle20(ui, caps) |
|
1686 | 1686 | bundle.setcompression(compression, compopts) |
|
1687 | 1687 | _addpartsfromopts(ui, repo, bundle, source, outgoing, opts) |
|
1688 | 1688 | chunkiter = bundle.getchunks() |
|
1689 | 1689 | |
|
1690 | 1690 | return changegroup.writechunks(ui, chunkiter, filename, vfs=vfs) |
|
1691 | 1691 | |
|
1692 | 1692 | |
|
1693 | 1693 | def _addpartsfromopts(ui, repo, bundler, source, outgoing, opts): |
|
1694 | 1694 | # We should eventually reconcile this logic with the one behind |
|
1695 | 1695 | # 'exchange.getbundle2partsgenerator'. |
|
1696 | 1696 | # |
|
1697 | 1697 | # The type of input from 'getbundle' and 'writenewbundle' are a bit |
|
1698 | 1698 | # different right now. So we keep them separated for now for the sake of |
|
1699 | 1699 | # simplicity. |
|
1700 | 1700 | |
|
1701 | 1701 | # we might not always want a changegroup in such bundle, for example in |
|
1702 | 1702 | # stream bundles |
|
1703 | 1703 | if opts.get(b'changegroup', True): |
|
1704 | 1704 | cgversion = opts.get(b'cg.version') |
|
1705 | 1705 | if cgversion is None: |
|
1706 | 1706 | cgversion = changegroup.safeversion(repo) |
|
1707 | 1707 | cg = changegroup.makechangegroup(repo, outgoing, cgversion, source) |
|
1708 | 1708 | part = bundler.newpart(b'changegroup', data=cg.getchunks()) |
|
1709 | 1709 | part.addparam(b'version', cg.version) |
|
1710 | 1710 | if b'clcount' in cg.extras: |
|
1711 | 1711 | part.addparam( |
|
1712 | 1712 | b'nbchanges', b'%d' % cg.extras[b'clcount'], mandatory=False |
|
1713 | 1713 | ) |
|
1714 | 1714 | if opts.get(b'phases') and repo.revs( |
|
1715 | 1715 | b'%ln and secret()', outgoing.ancestorsof |
|
1716 | 1716 | ): |
|
1717 | 1717 | part.addparam( |
|
1718 | 1718 | b'targetphase', b'%d' % phases.secret, mandatory=False |
|
1719 | 1719 | ) |
|
1720 | 1720 | if b'exp-sidedata-flag' in repo.requirements: |
|
1721 | 1721 | part.addparam(b'exp-sidedata', b'1') |
|
1722 | 1722 | |
|
1723 | 1723 | if opts.get(b'streamv2', False): |
|
1724 | 1724 | addpartbundlestream2(bundler, repo, stream=True) |
|
1725 | 1725 | |
|
1726 | 1726 | if opts.get(b'tagsfnodescache', True): |
|
1727 | 1727 | addparttagsfnodescache(repo, bundler, outgoing) |
|
1728 | 1728 | |
|
1729 | 1729 | if opts.get(b'revbranchcache', True): |
|
1730 | 1730 | addpartrevbranchcache(repo, bundler, outgoing) |
|
1731 | 1731 | |
|
1732 | 1732 | if opts.get(b'obsolescence', False): |
|
1733 | 1733 | obsmarkers = repo.obsstore.relevantmarkers(outgoing.missing) |
|
1734 | 1734 | buildobsmarkerspart(bundler, obsmarkers) |
|
1735 | 1735 | |
|
1736 | 1736 | if opts.get(b'phases', False): |
|
1737 | 1737 | headsbyphase = phases.subsetphaseheads(repo, outgoing.missing) |
|
1738 | 1738 | phasedata = phases.binaryencode(headsbyphase) |
|
1739 | 1739 | bundler.newpart(b'phase-heads', data=phasedata) |
|
1740 | 1740 | |
|
1741 | 1741 | |
|
1742 | 1742 | def addparttagsfnodescache(repo, bundler, outgoing): |
|
1743 | 1743 | # we include the tags fnode cache for the bundle changeset |
|
1744 | 1744 | # (as an optional parts) |
|
1745 | 1745 | cache = tags.hgtagsfnodescache(repo.unfiltered()) |
|
1746 | 1746 | chunks = [] |
|
1747 | 1747 | |
|
1748 | 1748 | # .hgtags fnodes are only relevant for head changesets. While we could |
|
1749 | 1749 | # transfer values for all known nodes, there will likely be little to |
|
1750 | 1750 | # no benefit. |
|
1751 | 1751 | # |
|
1752 | 1752 | # We don't bother using a generator to produce output data because |
|
1753 | 1753 | # a) we only have 40 bytes per head and even esoteric numbers of heads |
|
1754 | 1754 | # consume little memory (1M heads is 40MB) b) we don't want to send the |
|
1755 | 1755 | # part if we don't have entries and knowing if we have entries requires |
|
1756 | 1756 | # cache lookups. |
|
1757 | 1757 | for node in outgoing.ancestorsof: |
|
1758 | 1758 | # Don't compute missing, as this may slow down serving. |
|
1759 | 1759 | fnode = cache.getfnode(node, computemissing=False) |
|
1760 | 1760 | if fnode is not None: |
|
1761 | 1761 | chunks.extend([node, fnode]) |
|
1762 | 1762 | |
|
1763 | 1763 | if chunks: |
|
1764 | 1764 | bundler.newpart(b'hgtagsfnodes', data=b''.join(chunks)) |
|
1765 | 1765 | |
|
1766 | 1766 | |
|
1767 | 1767 | def addpartrevbranchcache(repo, bundler, outgoing): |
|
1768 | 1768 | # we include the rev branch cache for the bundle changeset |
|
1769 | 1769 | # (as an optional parts) |
|
1770 | 1770 | cache = repo.revbranchcache() |
|
1771 | 1771 | cl = repo.unfiltered().changelog |
|
1772 | 1772 | branchesdata = collections.defaultdict(lambda: (set(), set())) |
|
1773 | 1773 | for node in outgoing.missing: |
|
1774 | 1774 | branch, close = cache.branchinfo(cl.rev(node)) |
|
1775 | 1775 | branchesdata[branch][close].add(node) |
|
1776 | 1776 | |
|
1777 | 1777 | def generate(): |
|
1778 | 1778 | for branch, (nodes, closed) in sorted(branchesdata.items()): |
|
1779 | 1779 | utf8branch = encoding.fromlocal(branch) |
|
1780 | 1780 | yield rbcstruct.pack(len(utf8branch), len(nodes), len(closed)) |
|
1781 | 1781 | yield utf8branch |
|
1782 | 1782 | for n in sorted(nodes): |
|
1783 | 1783 | yield n |
|
1784 | 1784 | for n in sorted(closed): |
|
1785 | 1785 | yield n |
|
1786 | 1786 | |
|
1787 | 1787 | bundler.newpart(b'cache:rev-branch-cache', data=generate(), mandatory=False) |
|
1788 | 1788 | |
|
1789 | 1789 | |
|
1790 | 1790 | def _formatrequirementsspec(requirements): |
|
1791 | 1791 | requirements = [req for req in requirements if req != b"shared"] |
|
1792 | 1792 | return urlreq.quote(b','.join(sorted(requirements))) |
|
1793 | 1793 | |
|
1794 | 1794 | |
|
1795 | 1795 | def _formatrequirementsparams(requirements): |
|
1796 | 1796 | requirements = _formatrequirementsspec(requirements) |
|
1797 | 1797 | params = b"%s%s" % (urlreq.quote(b"requirements="), requirements) |
|
1798 | 1798 | return params |
|
1799 | 1799 | |
|
1800 | 1800 | |
|
1801 | 1801 | def addpartbundlestream2(bundler, repo, **kwargs): |
|
1802 | 1802 | if not kwargs.get('stream', False): |
|
1803 | 1803 | return |
|
1804 | 1804 | |
|
1805 | 1805 | if not streamclone.allowservergeneration(repo): |
|
1806 | 1806 | raise error.Abort( |
|
1807 | 1807 | _( |
|
1808 | 1808 | b'stream data requested but server does not allow ' |
|
1809 | 1809 | b'this feature' |
|
1810 | 1810 | ), |
|
1811 | 1811 | hint=_( |
|
1812 | 1812 | b'well-behaved clients should not be ' |
|
1813 | 1813 | b'requesting stream data from servers not ' |
|
1814 | 1814 | b'advertising it; the client may be buggy' |
|
1815 | 1815 | ), |
|
1816 | 1816 | ) |
|
1817 | 1817 | |
|
1818 | 1818 | # Stream clones don't compress well. And compression undermines a |
|
1819 | 1819 | # goal of stream clones, which is to be fast. Communicate the desire |
|
1820 | 1820 | # to avoid compression to consumers of the bundle. |
|
1821 | 1821 | bundler.prefercompressed = False |
|
1822 | 1822 | |
|
1823 | 1823 | # get the includes and excludes |
|
1824 | 1824 | includepats = kwargs.get('includepats') |
|
1825 | 1825 | excludepats = kwargs.get('excludepats') |
|
1826 | 1826 | |
|
1827 | 1827 | narrowstream = repo.ui.configbool( |
|
1828 | 1828 | b'experimental', b'server.stream-narrow-clones' |
|
1829 | 1829 | ) |
|
1830 | 1830 | |
|
1831 | 1831 | if (includepats or excludepats) and not narrowstream: |
|
1832 | 1832 | raise error.Abort(_(b'server does not support narrow stream clones')) |
|
1833 | 1833 | |
|
1834 | 1834 | includeobsmarkers = False |
|
1835 | 1835 | if repo.obsstore: |
|
1836 | 1836 | remoteversions = obsmarkersversion(bundler.capabilities) |
|
1837 | 1837 | if not remoteversions: |
|
1838 | 1838 | raise error.Abort( |
|
1839 | 1839 | _( |
|
1840 | 1840 | b'server has obsolescence markers, but client ' |
|
1841 | 1841 | b'cannot receive them via stream clone' |
|
1842 | 1842 | ) |
|
1843 | 1843 | ) |
|
1844 | 1844 | elif repo.obsstore._version in remoteversions: |
|
1845 | 1845 | includeobsmarkers = True |
|
1846 | 1846 | |
|
1847 | 1847 | filecount, bytecount, it = streamclone.generatev2( |
|
1848 | 1848 | repo, includepats, excludepats, includeobsmarkers |
|
1849 | 1849 | ) |
|
1850 | 1850 | requirements = _formatrequirementsspec(repo.requirements) |
|
1851 | 1851 | part = bundler.newpart(b'stream2', data=it) |
|
1852 | 1852 | part.addparam(b'bytecount', b'%d' % bytecount, mandatory=True) |
|
1853 | 1853 | part.addparam(b'filecount', b'%d' % filecount, mandatory=True) |
|
1854 | 1854 | part.addparam(b'requirements', requirements, mandatory=True) |
|
1855 | 1855 | |
|
1856 | 1856 | |
|
1857 | 1857 | def buildobsmarkerspart(bundler, markers): |
|
1858 | 1858 | """add an obsmarker part to the bundler with <markers> |
|
1859 | 1859 | |
|
1860 | 1860 | No part is created if markers is empty. |
|
1861 | 1861 | Raises ValueError if the bundler doesn't support any known obsmarker format. |
|
1862 | 1862 | """ |
|
1863 | 1863 | if not markers: |
|
1864 | 1864 | return None |
|
1865 | 1865 | |
|
1866 | 1866 | remoteversions = obsmarkersversion(bundler.capabilities) |
|
1867 | 1867 | version = obsolete.commonversion(remoteversions) |
|
1868 | 1868 | if version is None: |
|
1869 | 1869 | raise ValueError(b'bundler does not support common obsmarker format') |
|
1870 | 1870 | stream = obsolete.encodemarkers(markers, True, version=version) |
|
1871 | 1871 | return bundler.newpart(b'obsmarkers', data=stream) |
|
1872 | 1872 | |
|
1873 | 1873 | |
|
1874 | 1874 | def writebundle( |
|
1875 | 1875 | ui, cg, filename, bundletype, vfs=None, compression=None, compopts=None |
|
1876 | 1876 | ): |
|
1877 | 1877 | """Write a bundle file and return its filename. |
|
1878 | 1878 | |
|
1879 | 1879 | Existing files will not be overwritten. |
|
1880 | 1880 | If no filename is specified, a temporary file is created. |
|
1881 | 1881 | bz2 compression can be turned off. |
|
1882 | 1882 | The bundle file will be deleted in case of errors. |
|
1883 | 1883 | """ |
|
1884 | 1884 | |
|
1885 | 1885 | if bundletype == b"HG20": |
|
1886 | 1886 | bundle = bundle20(ui) |
|
1887 | 1887 | bundle.setcompression(compression, compopts) |
|
1888 | 1888 | part = bundle.newpart(b'changegroup', data=cg.getchunks()) |
|
1889 | 1889 | part.addparam(b'version', cg.version) |
|
1890 | 1890 | if b'clcount' in cg.extras: |
|
1891 | 1891 | part.addparam( |
|
1892 | 1892 | b'nbchanges', b'%d' % cg.extras[b'clcount'], mandatory=False |
|
1893 | 1893 | ) |
|
1894 | 1894 | chunkiter = bundle.getchunks() |
|
1895 | 1895 | else: |
|
1896 | 1896 | # compression argument is only for the bundle2 case |
|
1897 | 1897 | assert compression is None |
|
1898 | 1898 | if cg.version != b'01': |
|
1899 | 1899 | raise error.Abort( |
|
1900 | 1900 | _(b'old bundle types only supports v1 changegroups') |
|
1901 | 1901 | ) |
|
1902 | 1902 | header, comp = bundletypes[bundletype] |
|
1903 | 1903 | if comp not in util.compengines.supportedbundletypes: |
|
1904 | 1904 | raise error.Abort(_(b'unknown stream compression type: %s') % comp) |
|
1905 | 1905 | compengine = util.compengines.forbundletype(comp) |
|
1906 | 1906 | |
|
1907 | 1907 | def chunkiter(): |
|
1908 | 1908 | yield header |
|
1909 | 1909 | for chunk in compengine.compressstream(cg.getchunks(), compopts): |
|
1910 | 1910 | yield chunk |
|
1911 | 1911 | |
|
1912 | 1912 | chunkiter = chunkiter() |
|
1913 | 1913 | |
|
1914 | 1914 | # parse the changegroup data, otherwise we will block |
|
1915 | 1915 | # in case of sshrepo because we don't know the end of the stream |
|
1916 | 1916 | return changegroup.writechunks(ui, chunkiter, filename, vfs=vfs) |
|
1917 | 1917 | |
|
1918 | 1918 | |
|
1919 | 1919 | def combinechangegroupresults(op): |
|
1920 | 1920 | """logic to combine 0 or more addchangegroup results into one""" |
|
1921 | 1921 | results = [r.get(b'return', 0) for r in op.records[b'changegroup']] |
|
1922 | 1922 | changedheads = 0 |
|
1923 | 1923 | result = 1 |
|
1924 | 1924 | for ret in results: |
|
1925 | 1925 | # If any changegroup result is 0, return 0 |
|
1926 | 1926 | if ret == 0: |
|
1927 | 1927 | result = 0 |
|
1928 | 1928 | break |
|
1929 | 1929 | if ret < -1: |
|
1930 | 1930 | changedheads += ret + 1 |
|
1931 | 1931 | elif ret > 1: |
|
1932 | 1932 | changedheads += ret - 1 |
|
1933 | 1933 | if changedheads > 0: |
|
1934 | 1934 | result = 1 + changedheads |
|
1935 | 1935 | elif changedheads < 0: |
|
1936 | 1936 | result = -1 + changedheads |
|
1937 | 1937 | return result |
|
1938 | 1938 | |
|
1939 | 1939 | |
|
1940 | 1940 | @parthandler( |
|
1941 | 1941 | b'changegroup', |
|
1942 | 1942 | ( |
|
1943 | 1943 | b'version', |
|
1944 | 1944 | b'nbchanges', |
|
1945 | 1945 | b'exp-sidedata', |
|
1946 | 1946 | b'treemanifest', |
|
1947 | 1947 | b'targetphase', |
|
1948 | 1948 | ), |
|
1949 | 1949 | ) |
|
1950 | 1950 | def handlechangegroup(op, inpart): |
|
1951 | 1951 | """apply a changegroup part on the repo |
|
1952 | 1952 | |
|
1953 | 1953 | This is a very early implementation that will massive rework before being |
|
1954 | 1954 | inflicted to any end-user. |
|
1955 | 1955 | """ |
|
1956 | 1956 | from . import localrepo |
|
1957 | 1957 | |
|
1958 | 1958 | tr = op.gettransaction() |
|
1959 | 1959 | unpackerversion = inpart.params.get(b'version', b'01') |
|
1960 | 1960 | # We should raise an appropriate exception here |
|
1961 | 1961 | cg = changegroup.getunbundler(unpackerversion, inpart, None) |
|
1962 | 1962 | # the source and url passed here are overwritten by the one contained in |
|
1963 | 1963 | # the transaction.hookargs argument. So 'bundle2' is a placeholder |
|
1964 | 1964 | nbchangesets = None |
|
1965 | 1965 | if b'nbchanges' in inpart.params: |
|
1966 | 1966 | nbchangesets = int(inpart.params.get(b'nbchanges')) |
|
1967 | if ( | |
|
1968 | b'treemanifest' in inpart.params | |
|
1969 | and requirements.TREEMANIFEST_REQUIREMENT not in op.repo.requirements | |
|
1970 | ): | |
|
1967 | if b'treemanifest' in inpart.params and not scmutil.istreemanifest(op.repo): | |
|
1971 | 1968 | if len(op.repo.changelog) != 0: |
|
1972 | 1969 | raise error.Abort( |
|
1973 | 1970 | _( |
|
1974 | 1971 | b"bundle contains tree manifests, but local repo is " |
|
1975 | 1972 | b"non-empty and does not use tree manifests" |
|
1976 | 1973 | ) |
|
1977 | 1974 | ) |
|
1978 | 1975 | op.repo.requirements.add(requirements.TREEMANIFEST_REQUIREMENT) |
|
1979 | 1976 | op.repo.svfs.options = localrepo.resolvestorevfsoptions( |
|
1980 | 1977 | op.repo.ui, op.repo.requirements, op.repo.features |
|
1981 | 1978 | ) |
|
1982 | 1979 | scmutil.writereporequirements(op.repo) |
|
1983 | 1980 | |
|
1984 | 1981 | bundlesidedata = bool(b'exp-sidedata' in inpart.params) |
|
1985 | 1982 | reposidedata = bool(b'exp-sidedata-flag' in op.repo.requirements) |
|
1986 | 1983 | if reposidedata and not bundlesidedata: |
|
1987 | 1984 | msg = b"repository is using sidedata but the bundle source do not" |
|
1988 | 1985 | hint = b'this is currently unsupported' |
|
1989 | 1986 | raise error.Abort(msg, hint=hint) |
|
1990 | 1987 | |
|
1991 | 1988 | extrakwargs = {} |
|
1992 | 1989 | targetphase = inpart.params.get(b'targetphase') |
|
1993 | 1990 | if targetphase is not None: |
|
1994 | 1991 | extrakwargs['targetphase'] = int(targetphase) |
|
1995 | 1992 | ret = _processchangegroup( |
|
1996 | 1993 | op, |
|
1997 | 1994 | cg, |
|
1998 | 1995 | tr, |
|
1999 | 1996 | b'bundle2', |
|
2000 | 1997 | b'bundle2', |
|
2001 | 1998 | expectedtotal=nbchangesets, |
|
2002 | 1999 | **extrakwargs |
|
2003 | 2000 | ) |
|
2004 | 2001 | if op.reply is not None: |
|
2005 | 2002 | # This is definitely not the final form of this |
|
2006 | 2003 | # return. But one need to start somewhere. |
|
2007 | 2004 | part = op.reply.newpart(b'reply:changegroup', mandatory=False) |
|
2008 | 2005 | part.addparam( |
|
2009 | 2006 | b'in-reply-to', pycompat.bytestr(inpart.id), mandatory=False |
|
2010 | 2007 | ) |
|
2011 | 2008 | part.addparam(b'return', b'%i' % ret, mandatory=False) |
|
2012 | 2009 | assert not inpart.read() |
|
2013 | 2010 | |
|
2014 | 2011 | |
|
2015 | 2012 | _remotechangegroupparams = tuple( |
|
2016 | 2013 | [b'url', b'size', b'digests'] |
|
2017 | 2014 | + [b'digest:%s' % k for k in util.DIGESTS.keys()] |
|
2018 | 2015 | ) |
|
2019 | 2016 | |
|
2020 | 2017 | |
|
2021 | 2018 | @parthandler(b'remote-changegroup', _remotechangegroupparams) |
|
2022 | 2019 | def handleremotechangegroup(op, inpart): |
|
2023 | 2020 | """apply a bundle10 on the repo, given an url and validation information |
|
2024 | 2021 | |
|
2025 | 2022 | All the information about the remote bundle to import are given as |
|
2026 | 2023 | parameters. The parameters include: |
|
2027 | 2024 | - url: the url to the bundle10. |
|
2028 | 2025 | - size: the bundle10 file size. It is used to validate what was |
|
2029 | 2026 | retrieved by the client matches the server knowledge about the bundle. |
|
2030 | 2027 | - digests: a space separated list of the digest types provided as |
|
2031 | 2028 | parameters. |
|
2032 | 2029 | - digest:<digest-type>: the hexadecimal representation of the digest with |
|
2033 | 2030 | that name. Like the size, it is used to validate what was retrieved by |
|
2034 | 2031 | the client matches what the server knows about the bundle. |
|
2035 | 2032 | |
|
2036 | 2033 | When multiple digest types are given, all of them are checked. |
|
2037 | 2034 | """ |
|
2038 | 2035 | try: |
|
2039 | 2036 | raw_url = inpart.params[b'url'] |
|
2040 | 2037 | except KeyError: |
|
2041 | 2038 | raise error.Abort(_(b'remote-changegroup: missing "%s" param') % b'url') |
|
2042 | 2039 | parsed_url = util.url(raw_url) |
|
2043 | 2040 | if parsed_url.scheme not in capabilities[b'remote-changegroup']: |
|
2044 | 2041 | raise error.Abort( |
|
2045 | 2042 | _(b'remote-changegroup does not support %s urls') |
|
2046 | 2043 | % parsed_url.scheme |
|
2047 | 2044 | ) |
|
2048 | 2045 | |
|
2049 | 2046 | try: |
|
2050 | 2047 | size = int(inpart.params[b'size']) |
|
2051 | 2048 | except ValueError: |
|
2052 | 2049 | raise error.Abort( |
|
2053 | 2050 | _(b'remote-changegroup: invalid value for param "%s"') % b'size' |
|
2054 | 2051 | ) |
|
2055 | 2052 | except KeyError: |
|
2056 | 2053 | raise error.Abort( |
|
2057 | 2054 | _(b'remote-changegroup: missing "%s" param') % b'size' |
|
2058 | 2055 | ) |
|
2059 | 2056 | |
|
2060 | 2057 | digests = {} |
|
2061 | 2058 | for typ in inpart.params.get(b'digests', b'').split(): |
|
2062 | 2059 | param = b'digest:%s' % typ |
|
2063 | 2060 | try: |
|
2064 | 2061 | value = inpart.params[param] |
|
2065 | 2062 | except KeyError: |
|
2066 | 2063 | raise error.Abort( |
|
2067 | 2064 | _(b'remote-changegroup: missing "%s" param') % param |
|
2068 | 2065 | ) |
|
2069 | 2066 | digests[typ] = value |
|
2070 | 2067 | |
|
2071 | 2068 | real_part = util.digestchecker(url.open(op.ui, raw_url), size, digests) |
|
2072 | 2069 | |
|
2073 | 2070 | tr = op.gettransaction() |
|
2074 | 2071 | from . import exchange |
|
2075 | 2072 | |
|
2076 | 2073 | cg = exchange.readbundle(op.repo.ui, real_part, raw_url) |
|
2077 | 2074 | if not isinstance(cg, changegroup.cg1unpacker): |
|
2078 | 2075 | raise error.Abort( |
|
2079 | 2076 | _(b'%s: not a bundle version 1.0') % util.hidepassword(raw_url) |
|
2080 | 2077 | ) |
|
2081 | 2078 | ret = _processchangegroup(op, cg, tr, b'bundle2', b'bundle2') |
|
2082 | 2079 | if op.reply is not None: |
|
2083 | 2080 | # This is definitely not the final form of this |
|
2084 | 2081 | # return. But one need to start somewhere. |
|
2085 | 2082 | part = op.reply.newpart(b'reply:changegroup') |
|
2086 | 2083 | part.addparam( |
|
2087 | 2084 | b'in-reply-to', pycompat.bytestr(inpart.id), mandatory=False |
|
2088 | 2085 | ) |
|
2089 | 2086 | part.addparam(b'return', b'%i' % ret, mandatory=False) |
|
2090 | 2087 | try: |
|
2091 | 2088 | real_part.validate() |
|
2092 | 2089 | except error.Abort as e: |
|
2093 | 2090 | raise error.Abort( |
|
2094 | 2091 | _(b'bundle at %s is corrupted:\n%s') |
|
2095 | 2092 | % (util.hidepassword(raw_url), bytes(e)) |
|
2096 | 2093 | ) |
|
2097 | 2094 | assert not inpart.read() |
|
2098 | 2095 | |
|
2099 | 2096 | |
|
2100 | 2097 | @parthandler(b'reply:changegroup', (b'return', b'in-reply-to')) |
|
2101 | 2098 | def handlereplychangegroup(op, inpart): |
|
2102 | 2099 | ret = int(inpart.params[b'return']) |
|
2103 | 2100 | replyto = int(inpart.params[b'in-reply-to']) |
|
2104 | 2101 | op.records.add(b'changegroup', {b'return': ret}, replyto) |
|
2105 | 2102 | |
|
2106 | 2103 | |
|
2107 | 2104 | @parthandler(b'check:bookmarks') |
|
2108 | 2105 | def handlecheckbookmarks(op, inpart): |
|
2109 | 2106 | """check location of bookmarks |
|
2110 | 2107 | |
|
2111 | 2108 | This part is to be used to detect push race regarding bookmark, it |
|
2112 | 2109 | contains binary encoded (bookmark, node) tuple. If the local state does |
|
2113 | 2110 | not marks the one in the part, a PushRaced exception is raised |
|
2114 | 2111 | """ |
|
2115 | 2112 | bookdata = bookmarks.binarydecode(inpart) |
|
2116 | 2113 | |
|
2117 | 2114 | msgstandard = ( |
|
2118 | 2115 | b'remote repository changed while pushing - please try again ' |
|
2119 | 2116 | b'(bookmark "%s" move from %s to %s)' |
|
2120 | 2117 | ) |
|
2121 | 2118 | msgmissing = ( |
|
2122 | 2119 | b'remote repository changed while pushing - please try again ' |
|
2123 | 2120 | b'(bookmark "%s" is missing, expected %s)' |
|
2124 | 2121 | ) |
|
2125 | 2122 | msgexist = ( |
|
2126 | 2123 | b'remote repository changed while pushing - please try again ' |
|
2127 | 2124 | b'(bookmark "%s" set on %s, expected missing)' |
|
2128 | 2125 | ) |
|
2129 | 2126 | for book, node in bookdata: |
|
2130 | 2127 | currentnode = op.repo._bookmarks.get(book) |
|
2131 | 2128 | if currentnode != node: |
|
2132 | 2129 | if node is None: |
|
2133 | 2130 | finalmsg = msgexist % (book, nodemod.short(currentnode)) |
|
2134 | 2131 | elif currentnode is None: |
|
2135 | 2132 | finalmsg = msgmissing % (book, nodemod.short(node)) |
|
2136 | 2133 | else: |
|
2137 | 2134 | finalmsg = msgstandard % ( |
|
2138 | 2135 | book, |
|
2139 | 2136 | nodemod.short(node), |
|
2140 | 2137 | nodemod.short(currentnode), |
|
2141 | 2138 | ) |
|
2142 | 2139 | raise error.PushRaced(finalmsg) |
|
2143 | 2140 | |
|
2144 | 2141 | |
|
2145 | 2142 | @parthandler(b'check:heads') |
|
2146 | 2143 | def handlecheckheads(op, inpart): |
|
2147 | 2144 | """check that head of the repo did not change |
|
2148 | 2145 | |
|
2149 | 2146 | This is used to detect a push race when using unbundle. |
|
2150 | 2147 | This replaces the "heads" argument of unbundle.""" |
|
2151 | 2148 | h = inpart.read(20) |
|
2152 | 2149 | heads = [] |
|
2153 | 2150 | while len(h) == 20: |
|
2154 | 2151 | heads.append(h) |
|
2155 | 2152 | h = inpart.read(20) |
|
2156 | 2153 | assert not h |
|
2157 | 2154 | # Trigger a transaction so that we are guaranteed to have the lock now. |
|
2158 | 2155 | if op.ui.configbool(b'experimental', b'bundle2lazylocking'): |
|
2159 | 2156 | op.gettransaction() |
|
2160 | 2157 | if sorted(heads) != sorted(op.repo.heads()): |
|
2161 | 2158 | raise error.PushRaced( |
|
2162 | 2159 | b'remote repository changed while pushing - please try again' |
|
2163 | 2160 | ) |
|
2164 | 2161 | |
|
2165 | 2162 | |
|
2166 | 2163 | @parthandler(b'check:updated-heads') |
|
2167 | 2164 | def handlecheckupdatedheads(op, inpart): |
|
2168 | 2165 | """check for race on the heads touched by a push |
|
2169 | 2166 | |
|
2170 | 2167 | This is similar to 'check:heads' but focus on the heads actually updated |
|
2171 | 2168 | during the push. If other activities happen on unrelated heads, it is |
|
2172 | 2169 | ignored. |
|
2173 | 2170 | |
|
2174 | 2171 | This allow server with high traffic to avoid push contention as long as |
|
2175 | 2172 | unrelated parts of the graph are involved.""" |
|
2176 | 2173 | h = inpart.read(20) |
|
2177 | 2174 | heads = [] |
|
2178 | 2175 | while len(h) == 20: |
|
2179 | 2176 | heads.append(h) |
|
2180 | 2177 | h = inpart.read(20) |
|
2181 | 2178 | assert not h |
|
2182 | 2179 | # trigger a transaction so that we are guaranteed to have the lock now. |
|
2183 | 2180 | if op.ui.configbool(b'experimental', b'bundle2lazylocking'): |
|
2184 | 2181 | op.gettransaction() |
|
2185 | 2182 | |
|
2186 | 2183 | currentheads = set() |
|
2187 | 2184 | for ls in op.repo.branchmap().iterheads(): |
|
2188 | 2185 | currentheads.update(ls) |
|
2189 | 2186 | |
|
2190 | 2187 | for h in heads: |
|
2191 | 2188 | if h not in currentheads: |
|
2192 | 2189 | raise error.PushRaced( |
|
2193 | 2190 | b'remote repository changed while pushing - ' |
|
2194 | 2191 | b'please try again' |
|
2195 | 2192 | ) |
|
2196 | 2193 | |
|
2197 | 2194 | |
|
2198 | 2195 | @parthandler(b'check:phases') |
|
2199 | 2196 | def handlecheckphases(op, inpart): |
|
2200 | 2197 | """check that phase boundaries of the repository did not change |
|
2201 | 2198 | |
|
2202 | 2199 | This is used to detect a push race. |
|
2203 | 2200 | """ |
|
2204 | 2201 | phasetonodes = phases.binarydecode(inpart) |
|
2205 | 2202 | unfi = op.repo.unfiltered() |
|
2206 | 2203 | cl = unfi.changelog |
|
2207 | 2204 | phasecache = unfi._phasecache |
|
2208 | 2205 | msg = ( |
|
2209 | 2206 | b'remote repository changed while pushing - please try again ' |
|
2210 | 2207 | b'(%s is %s expected %s)' |
|
2211 | 2208 | ) |
|
2212 | 2209 | for expectedphase, nodes in pycompat.iteritems(phasetonodes): |
|
2213 | 2210 | for n in nodes: |
|
2214 | 2211 | actualphase = phasecache.phase(unfi, cl.rev(n)) |
|
2215 | 2212 | if actualphase != expectedphase: |
|
2216 | 2213 | finalmsg = msg % ( |
|
2217 | 2214 | nodemod.short(n), |
|
2218 | 2215 | phases.phasenames[actualphase], |
|
2219 | 2216 | phases.phasenames[expectedphase], |
|
2220 | 2217 | ) |
|
2221 | 2218 | raise error.PushRaced(finalmsg) |
|
2222 | 2219 | |
|
2223 | 2220 | |
|
2224 | 2221 | @parthandler(b'output') |
|
2225 | 2222 | def handleoutput(op, inpart): |
|
2226 | 2223 | """forward output captured on the server to the client""" |
|
2227 | 2224 | for line in inpart.read().splitlines(): |
|
2228 | 2225 | op.ui.status(_(b'remote: %s\n') % line) |
|
2229 | 2226 | |
|
2230 | 2227 | |
|
2231 | 2228 | @parthandler(b'replycaps') |
|
2232 | 2229 | def handlereplycaps(op, inpart): |
|
2233 | 2230 | """Notify that a reply bundle should be created |
|
2234 | 2231 | |
|
2235 | 2232 | The payload contains the capabilities information for the reply""" |
|
2236 | 2233 | caps = decodecaps(inpart.read()) |
|
2237 | 2234 | if op.reply is None: |
|
2238 | 2235 | op.reply = bundle20(op.ui, caps) |
|
2239 | 2236 | |
|
2240 | 2237 | |
|
2241 | 2238 | class AbortFromPart(error.Abort): |
|
2242 | 2239 | """Sub-class of Abort that denotes an error from a bundle2 part.""" |
|
2243 | 2240 | |
|
2244 | 2241 | |
|
2245 | 2242 | @parthandler(b'error:abort', (b'message', b'hint')) |
|
2246 | 2243 | def handleerrorabort(op, inpart): |
|
2247 | 2244 | """Used to transmit abort error over the wire""" |
|
2248 | 2245 | raise AbortFromPart( |
|
2249 | 2246 | inpart.params[b'message'], hint=inpart.params.get(b'hint') |
|
2250 | 2247 | ) |
|
2251 | 2248 | |
|
2252 | 2249 | |
|
2253 | 2250 | @parthandler( |
|
2254 | 2251 | b'error:pushkey', |
|
2255 | 2252 | (b'namespace', b'key', b'new', b'old', b'ret', b'in-reply-to'), |
|
2256 | 2253 | ) |
|
2257 | 2254 | def handleerrorpushkey(op, inpart): |
|
2258 | 2255 | """Used to transmit failure of a mandatory pushkey over the wire""" |
|
2259 | 2256 | kwargs = {} |
|
2260 | 2257 | for name in (b'namespace', b'key', b'new', b'old', b'ret'): |
|
2261 | 2258 | value = inpart.params.get(name) |
|
2262 | 2259 | if value is not None: |
|
2263 | 2260 | kwargs[name] = value |
|
2264 | 2261 | raise error.PushkeyFailed( |
|
2265 | 2262 | inpart.params[b'in-reply-to'], **pycompat.strkwargs(kwargs) |
|
2266 | 2263 | ) |
|
2267 | 2264 | |
|
2268 | 2265 | |
|
2269 | 2266 | @parthandler(b'error:unsupportedcontent', (b'parttype', b'params')) |
|
2270 | 2267 | def handleerrorunsupportedcontent(op, inpart): |
|
2271 | 2268 | """Used to transmit unknown content error over the wire""" |
|
2272 | 2269 | kwargs = {} |
|
2273 | 2270 | parttype = inpart.params.get(b'parttype') |
|
2274 | 2271 | if parttype is not None: |
|
2275 | 2272 | kwargs[b'parttype'] = parttype |
|
2276 | 2273 | params = inpart.params.get(b'params') |
|
2277 | 2274 | if params is not None: |
|
2278 | 2275 | kwargs[b'params'] = params.split(b'\0') |
|
2279 | 2276 | |
|
2280 | 2277 | raise error.BundleUnknownFeatureError(**pycompat.strkwargs(kwargs)) |
|
2281 | 2278 | |
|
2282 | 2279 | |
|
2283 | 2280 | @parthandler(b'error:pushraced', (b'message',)) |
|
2284 | 2281 | def handleerrorpushraced(op, inpart): |
|
2285 | 2282 | """Used to transmit push race error over the wire""" |
|
2286 | 2283 | raise error.ResponseError(_(b'push failed:'), inpart.params[b'message']) |
|
2287 | 2284 | |
|
2288 | 2285 | |
|
2289 | 2286 | @parthandler(b'listkeys', (b'namespace',)) |
|
2290 | 2287 | def handlelistkeys(op, inpart): |
|
2291 | 2288 | """retrieve pushkey namespace content stored in a bundle2""" |
|
2292 | 2289 | namespace = inpart.params[b'namespace'] |
|
2293 | 2290 | r = pushkey.decodekeys(inpart.read()) |
|
2294 | 2291 | op.records.add(b'listkeys', (namespace, r)) |
|
2295 | 2292 | |
|
2296 | 2293 | |
|
2297 | 2294 | @parthandler(b'pushkey', (b'namespace', b'key', b'old', b'new')) |
|
2298 | 2295 | def handlepushkey(op, inpart): |
|
2299 | 2296 | """process a pushkey request""" |
|
2300 | 2297 | dec = pushkey.decode |
|
2301 | 2298 | namespace = dec(inpart.params[b'namespace']) |
|
2302 | 2299 | key = dec(inpart.params[b'key']) |
|
2303 | 2300 | old = dec(inpart.params[b'old']) |
|
2304 | 2301 | new = dec(inpart.params[b'new']) |
|
2305 | 2302 | # Grab the transaction to ensure that we have the lock before performing the |
|
2306 | 2303 | # pushkey. |
|
2307 | 2304 | if op.ui.configbool(b'experimental', b'bundle2lazylocking'): |
|
2308 | 2305 | op.gettransaction() |
|
2309 | 2306 | ret = op.repo.pushkey(namespace, key, old, new) |
|
2310 | 2307 | record = {b'namespace': namespace, b'key': key, b'old': old, b'new': new} |
|
2311 | 2308 | op.records.add(b'pushkey', record) |
|
2312 | 2309 | if op.reply is not None: |
|
2313 | 2310 | rpart = op.reply.newpart(b'reply:pushkey') |
|
2314 | 2311 | rpart.addparam( |
|
2315 | 2312 | b'in-reply-to', pycompat.bytestr(inpart.id), mandatory=False |
|
2316 | 2313 | ) |
|
2317 | 2314 | rpart.addparam(b'return', b'%i' % ret, mandatory=False) |
|
2318 | 2315 | if inpart.mandatory and not ret: |
|
2319 | 2316 | kwargs = {} |
|
2320 | 2317 | for key in (b'namespace', b'key', b'new', b'old', b'ret'): |
|
2321 | 2318 | if key in inpart.params: |
|
2322 | 2319 | kwargs[key] = inpart.params[key] |
|
2323 | 2320 | raise error.PushkeyFailed( |
|
2324 | 2321 | partid=b'%d' % inpart.id, **pycompat.strkwargs(kwargs) |
|
2325 | 2322 | ) |
|
2326 | 2323 | |
|
2327 | 2324 | |
|
2328 | 2325 | @parthandler(b'bookmarks') |
|
2329 | 2326 | def handlebookmark(op, inpart): |
|
2330 | 2327 | """transmit bookmark information |
|
2331 | 2328 | |
|
2332 | 2329 | The part contains binary encoded bookmark information. |
|
2333 | 2330 | |
|
2334 | 2331 | The exact behavior of this part can be controlled by the 'bookmarks' mode |
|
2335 | 2332 | on the bundle operation. |
|
2336 | 2333 | |
|
2337 | 2334 | When mode is 'apply' (the default) the bookmark information is applied as |
|
2338 | 2335 | is to the unbundling repository. Make sure a 'check:bookmarks' part is |
|
2339 | 2336 | issued earlier to check for push races in such update. This behavior is |
|
2340 | 2337 | suitable for pushing. |
|
2341 | 2338 | |
|
2342 | 2339 | When mode is 'records', the information is recorded into the 'bookmarks' |
|
2343 | 2340 | records of the bundle operation. This behavior is suitable for pulling. |
|
2344 | 2341 | """ |
|
2345 | 2342 | changes = bookmarks.binarydecode(inpart) |
|
2346 | 2343 | |
|
2347 | 2344 | pushkeycompat = op.repo.ui.configbool( |
|
2348 | 2345 | b'server', b'bookmarks-pushkey-compat' |
|
2349 | 2346 | ) |
|
2350 | 2347 | bookmarksmode = op.modes.get(b'bookmarks', b'apply') |
|
2351 | 2348 | |
|
2352 | 2349 | if bookmarksmode == b'apply': |
|
2353 | 2350 | tr = op.gettransaction() |
|
2354 | 2351 | bookstore = op.repo._bookmarks |
|
2355 | 2352 | if pushkeycompat: |
|
2356 | 2353 | allhooks = [] |
|
2357 | 2354 | for book, node in changes: |
|
2358 | 2355 | hookargs = tr.hookargs.copy() |
|
2359 | 2356 | hookargs[b'pushkeycompat'] = b'1' |
|
2360 | 2357 | hookargs[b'namespace'] = b'bookmarks' |
|
2361 | 2358 | hookargs[b'key'] = book |
|
2362 | 2359 | hookargs[b'old'] = nodemod.hex(bookstore.get(book, b'')) |
|
2363 | 2360 | hookargs[b'new'] = nodemod.hex( |
|
2364 | 2361 | node if node is not None else b'' |
|
2365 | 2362 | ) |
|
2366 | 2363 | allhooks.append(hookargs) |
|
2367 | 2364 | |
|
2368 | 2365 | for hookargs in allhooks: |
|
2369 | 2366 | op.repo.hook( |
|
2370 | 2367 | b'prepushkey', throw=True, **pycompat.strkwargs(hookargs) |
|
2371 | 2368 | ) |
|
2372 | 2369 | |
|
2373 | 2370 | for book, node in changes: |
|
2374 | 2371 | if bookmarks.isdivergent(book): |
|
2375 | 2372 | msg = _(b'cannot accept divergent bookmark %s!') % book |
|
2376 | 2373 | raise error.Abort(msg) |
|
2377 | 2374 | |
|
2378 | 2375 | bookstore.applychanges(op.repo, op.gettransaction(), changes) |
|
2379 | 2376 | |
|
2380 | 2377 | if pushkeycompat: |
|
2381 | 2378 | |
|
2382 | 2379 | def runhook(unused_success): |
|
2383 | 2380 | for hookargs in allhooks: |
|
2384 | 2381 | op.repo.hook(b'pushkey', **pycompat.strkwargs(hookargs)) |
|
2385 | 2382 | |
|
2386 | 2383 | op.repo._afterlock(runhook) |
|
2387 | 2384 | |
|
2388 | 2385 | elif bookmarksmode == b'records': |
|
2389 | 2386 | for book, node in changes: |
|
2390 | 2387 | record = {b'bookmark': book, b'node': node} |
|
2391 | 2388 | op.records.add(b'bookmarks', record) |
|
2392 | 2389 | else: |
|
2393 | 2390 | raise error.ProgrammingError( |
|
2394 | 2391 | b'unkown bookmark mode: %s' % bookmarksmode |
|
2395 | 2392 | ) |
|
2396 | 2393 | |
|
2397 | 2394 | |
|
2398 | 2395 | @parthandler(b'phase-heads') |
|
2399 | 2396 | def handlephases(op, inpart): |
|
2400 | 2397 | """apply phases from bundle part to repo""" |
|
2401 | 2398 | headsbyphase = phases.binarydecode(inpart) |
|
2402 | 2399 | phases.updatephases(op.repo.unfiltered(), op.gettransaction, headsbyphase) |
|
2403 | 2400 | |
|
2404 | 2401 | |
|
2405 | 2402 | @parthandler(b'reply:pushkey', (b'return', b'in-reply-to')) |
|
2406 | 2403 | def handlepushkeyreply(op, inpart): |
|
2407 | 2404 | """retrieve the result of a pushkey request""" |
|
2408 | 2405 | ret = int(inpart.params[b'return']) |
|
2409 | 2406 | partid = int(inpart.params[b'in-reply-to']) |
|
2410 | 2407 | op.records.add(b'pushkey', {b'return': ret}, partid) |
|
2411 | 2408 | |
|
2412 | 2409 | |
|
2413 | 2410 | @parthandler(b'obsmarkers') |
|
2414 | 2411 | def handleobsmarker(op, inpart): |
|
2415 | 2412 | """add a stream of obsmarkers to the repo""" |
|
2416 | 2413 | tr = op.gettransaction() |
|
2417 | 2414 | markerdata = inpart.read() |
|
2418 | 2415 | if op.ui.config(b'experimental', b'obsmarkers-exchange-debug'): |
|
2419 | 2416 | op.ui.writenoi18n( |
|
2420 | 2417 | b'obsmarker-exchange: %i bytes received\n' % len(markerdata) |
|
2421 | 2418 | ) |
|
2422 | 2419 | # The mergemarkers call will crash if marker creation is not enabled. |
|
2423 | 2420 | # we want to avoid this if the part is advisory. |
|
2424 | 2421 | if not inpart.mandatory and op.repo.obsstore.readonly: |
|
2425 | 2422 | op.repo.ui.debug( |
|
2426 | 2423 | b'ignoring obsolescence markers, feature not enabled\n' |
|
2427 | 2424 | ) |
|
2428 | 2425 | return |
|
2429 | 2426 | new = op.repo.obsstore.mergemarkers(tr, markerdata) |
|
2430 | 2427 | op.repo.invalidatevolatilesets() |
|
2431 | 2428 | op.records.add(b'obsmarkers', {b'new': new}) |
|
2432 | 2429 | if op.reply is not None: |
|
2433 | 2430 | rpart = op.reply.newpart(b'reply:obsmarkers') |
|
2434 | 2431 | rpart.addparam( |
|
2435 | 2432 | b'in-reply-to', pycompat.bytestr(inpart.id), mandatory=False |
|
2436 | 2433 | ) |
|
2437 | 2434 | rpart.addparam(b'new', b'%i' % new, mandatory=False) |
|
2438 | 2435 | |
|
2439 | 2436 | |
|
2440 | 2437 | @parthandler(b'reply:obsmarkers', (b'new', b'in-reply-to')) |
|
2441 | 2438 | def handleobsmarkerreply(op, inpart): |
|
2442 | 2439 | """retrieve the result of a pushkey request""" |
|
2443 | 2440 | ret = int(inpart.params[b'new']) |
|
2444 | 2441 | partid = int(inpart.params[b'in-reply-to']) |
|
2445 | 2442 | op.records.add(b'obsmarkers', {b'new': ret}, partid) |
|
2446 | 2443 | |
|
2447 | 2444 | |
|
2448 | 2445 | @parthandler(b'hgtagsfnodes') |
|
2449 | 2446 | def handlehgtagsfnodes(op, inpart): |
|
2450 | 2447 | """Applies .hgtags fnodes cache entries to the local repo. |
|
2451 | 2448 | |
|
2452 | 2449 | Payload is pairs of 20 byte changeset nodes and filenodes. |
|
2453 | 2450 | """ |
|
2454 | 2451 | # Grab the transaction so we ensure that we have the lock at this point. |
|
2455 | 2452 | if op.ui.configbool(b'experimental', b'bundle2lazylocking'): |
|
2456 | 2453 | op.gettransaction() |
|
2457 | 2454 | cache = tags.hgtagsfnodescache(op.repo.unfiltered()) |
|
2458 | 2455 | |
|
2459 | 2456 | count = 0 |
|
2460 | 2457 | while True: |
|
2461 | 2458 | node = inpart.read(20) |
|
2462 | 2459 | fnode = inpart.read(20) |
|
2463 | 2460 | if len(node) < 20 or len(fnode) < 20: |
|
2464 | 2461 | op.ui.debug(b'ignoring incomplete received .hgtags fnodes data\n') |
|
2465 | 2462 | break |
|
2466 | 2463 | cache.setfnode(node, fnode) |
|
2467 | 2464 | count += 1 |
|
2468 | 2465 | |
|
2469 | 2466 | cache.write() |
|
2470 | 2467 | op.ui.debug(b'applied %i hgtags fnodes cache entries\n' % count) |
|
2471 | 2468 | |
|
2472 | 2469 | |
|
2473 | 2470 | rbcstruct = struct.Struct(b'>III') |
|
2474 | 2471 | |
|
2475 | 2472 | |
|
2476 | 2473 | @parthandler(b'cache:rev-branch-cache') |
|
2477 | 2474 | def handlerbc(op, inpart): |
|
2478 | 2475 | """receive a rev-branch-cache payload and update the local cache |
|
2479 | 2476 | |
|
2480 | 2477 | The payload is a series of data related to each branch |
|
2481 | 2478 | |
|
2482 | 2479 | 1) branch name length |
|
2483 | 2480 | 2) number of open heads |
|
2484 | 2481 | 3) number of closed heads |
|
2485 | 2482 | 4) open heads nodes |
|
2486 | 2483 | 5) closed heads nodes |
|
2487 | 2484 | """ |
|
2488 | 2485 | total = 0 |
|
2489 | 2486 | rawheader = inpart.read(rbcstruct.size) |
|
2490 | 2487 | cache = op.repo.revbranchcache() |
|
2491 | 2488 | cl = op.repo.unfiltered().changelog |
|
2492 | 2489 | while rawheader: |
|
2493 | 2490 | header = rbcstruct.unpack(rawheader) |
|
2494 | 2491 | total += header[1] + header[2] |
|
2495 | 2492 | utf8branch = inpart.read(header[0]) |
|
2496 | 2493 | branch = encoding.tolocal(utf8branch) |
|
2497 | 2494 | for x in pycompat.xrange(header[1]): |
|
2498 | 2495 | node = inpart.read(20) |
|
2499 | 2496 | rev = cl.rev(node) |
|
2500 | 2497 | cache.setdata(branch, rev, node, False) |
|
2501 | 2498 | for x in pycompat.xrange(header[2]): |
|
2502 | 2499 | node = inpart.read(20) |
|
2503 | 2500 | rev = cl.rev(node) |
|
2504 | 2501 | cache.setdata(branch, rev, node, True) |
|
2505 | 2502 | rawheader = inpart.read(rbcstruct.size) |
|
2506 | 2503 | cache.write() |
|
2507 | 2504 | |
|
2508 | 2505 | |
|
2509 | 2506 | @parthandler(b'pushvars') |
|
2510 | 2507 | def bundle2getvars(op, part): |
|
2511 | 2508 | '''unbundle a bundle2 containing shellvars on the server''' |
|
2512 | 2509 | # An option to disable unbundling on server-side for security reasons |
|
2513 | 2510 | if op.ui.configbool(b'push', b'pushvars.server'): |
|
2514 | 2511 | hookargs = {} |
|
2515 | 2512 | for key, value in part.advisoryparams: |
|
2516 | 2513 | key = key.upper() |
|
2517 | 2514 | # We want pushed variables to have USERVAR_ prepended so we know |
|
2518 | 2515 | # they came from the --pushvar flag. |
|
2519 | 2516 | key = b"USERVAR_" + key |
|
2520 | 2517 | hookargs[key] = value |
|
2521 | 2518 | op.addhookargs(hookargs) |
|
2522 | 2519 | |
|
2523 | 2520 | |
|
2524 | 2521 | @parthandler(b'stream2', (b'requirements', b'filecount', b'bytecount')) |
|
2525 | 2522 | def handlestreamv2bundle(op, part): |
|
2526 | 2523 | |
|
2527 | 2524 | requirements = urlreq.unquote(part.params[b'requirements']).split(b',') |
|
2528 | 2525 | filecount = int(part.params[b'filecount']) |
|
2529 | 2526 | bytecount = int(part.params[b'bytecount']) |
|
2530 | 2527 | |
|
2531 | 2528 | repo = op.repo |
|
2532 | 2529 | if len(repo): |
|
2533 | 2530 | msg = _(b'cannot apply stream clone to non empty repository') |
|
2534 | 2531 | raise error.Abort(msg) |
|
2535 | 2532 | |
|
2536 | 2533 | repo.ui.debug(b'applying stream bundle\n') |
|
2537 | 2534 | streamclone.applybundlev2(repo, part, filecount, bytecount, requirements) |
|
2538 | 2535 | |
|
2539 | 2536 | |
|
2540 | 2537 | def widen_bundle( |
|
2541 | 2538 | bundler, repo, oldmatcher, newmatcher, common, known, cgversion, ellipses |
|
2542 | 2539 | ): |
|
2543 | 2540 | """generates bundle2 for widening a narrow clone |
|
2544 | 2541 | |
|
2545 | 2542 | bundler is the bundle to which data should be added |
|
2546 | 2543 | repo is the localrepository instance |
|
2547 | 2544 | oldmatcher matches what the client already has |
|
2548 | 2545 | newmatcher matches what the client needs (including what it already has) |
|
2549 | 2546 | common is set of common heads between server and client |
|
2550 | 2547 | known is a set of revs known on the client side (used in ellipses) |
|
2551 | 2548 | cgversion is the changegroup version to send |
|
2552 | 2549 | ellipses is boolean value telling whether to send ellipses data or not |
|
2553 | 2550 | |
|
2554 | 2551 | returns bundle2 of the data required for extending |
|
2555 | 2552 | """ |
|
2556 | 2553 | commonnodes = set() |
|
2557 | 2554 | cl = repo.changelog |
|
2558 | 2555 | for r in repo.revs(b"::%ln", common): |
|
2559 | 2556 | commonnodes.add(cl.node(r)) |
|
2560 | 2557 | if commonnodes: |
|
2561 | 2558 | # XXX: we should only send the filelogs (and treemanifest). user |
|
2562 | 2559 | # already has the changelog and manifest |
|
2563 | 2560 | packer = changegroup.getbundler( |
|
2564 | 2561 | cgversion, |
|
2565 | 2562 | repo, |
|
2566 | 2563 | oldmatcher=oldmatcher, |
|
2567 | 2564 | matcher=newmatcher, |
|
2568 | 2565 | fullnodes=commonnodes, |
|
2569 | 2566 | ) |
|
2570 | 2567 | cgdata = packer.generate( |
|
2571 | 2568 | {nodemod.nullid}, |
|
2572 | 2569 | list(commonnodes), |
|
2573 | 2570 | False, |
|
2574 | 2571 | b'narrow_widen', |
|
2575 | 2572 | changelog=False, |
|
2576 | 2573 | ) |
|
2577 | 2574 | |
|
2578 | 2575 | part = bundler.newpart(b'changegroup', data=cgdata) |
|
2579 | 2576 | part.addparam(b'version', cgversion) |
|
2580 | if requirements.TREEMANIFEST_REQUIREMENT in repo.requirements: | |
|
2577 | if scmutil.istreemanifest(repo): | |
|
2581 | 2578 | part.addparam(b'treemanifest', b'1') |
|
2582 | 2579 | if b'exp-sidedata-flag' in repo.requirements: |
|
2583 | 2580 | part.addparam(b'exp-sidedata', b'1') |
|
2584 | 2581 | |
|
2585 | 2582 | return bundler |
@@ -1,1690 +1,1689 | |||
|
1 | 1 | # changegroup.py - Mercurial changegroup manipulation functions |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2006 Matt Mackall <mpm@selenic.com> |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | from __future__ import absolute_import |
|
9 | 9 | |
|
10 | 10 | import os |
|
11 | 11 | import struct |
|
12 | 12 | import weakref |
|
13 | 13 | |
|
14 | 14 | from .i18n import _ |
|
15 | 15 | from .node import ( |
|
16 | 16 | hex, |
|
17 | 17 | nullid, |
|
18 | 18 | nullrev, |
|
19 | 19 | short, |
|
20 | 20 | ) |
|
21 | 21 | from .pycompat import open |
|
22 | 22 | |
|
23 | 23 | from . import ( |
|
24 | 24 | error, |
|
25 | 25 | match as matchmod, |
|
26 | 26 | mdiff, |
|
27 | 27 | phases, |
|
28 | 28 | pycompat, |
|
29 | 29 | requirements, |
|
30 | scmutil, | |
|
30 | 31 | util, |
|
31 | 32 | ) |
|
32 | 33 | |
|
33 | 34 | from .interfaces import repository |
|
34 | 35 | |
|
35 | 36 | _CHANGEGROUPV1_DELTA_HEADER = struct.Struct(b"20s20s20s20s") |
|
36 | 37 | _CHANGEGROUPV2_DELTA_HEADER = struct.Struct(b"20s20s20s20s20s") |
|
37 | 38 | _CHANGEGROUPV3_DELTA_HEADER = struct.Struct(b">20s20s20s20s20sH") |
|
38 | 39 | |
|
39 | 40 | LFS_REQUIREMENT = b'lfs' |
|
40 | 41 | |
|
41 | 42 | readexactly = util.readexactly |
|
42 | 43 | |
|
43 | 44 | |
|
44 | 45 | def getchunk(stream): |
|
45 | 46 | """return the next chunk from stream as a string""" |
|
46 | 47 | d = readexactly(stream, 4) |
|
47 | 48 | l = struct.unpack(b">l", d)[0] |
|
48 | 49 | if l <= 4: |
|
49 | 50 | if l: |
|
50 | 51 | raise error.Abort(_(b"invalid chunk length %d") % l) |
|
51 | 52 | return b"" |
|
52 | 53 | return readexactly(stream, l - 4) |
|
53 | 54 | |
|
54 | 55 | |
|
55 | 56 | def chunkheader(length): |
|
56 | 57 | """return a changegroup chunk header (string)""" |
|
57 | 58 | return struct.pack(b">l", length + 4) |
|
58 | 59 | |
|
59 | 60 | |
|
60 | 61 | def closechunk(): |
|
61 | 62 | """return a changegroup chunk header (string) for a zero-length chunk""" |
|
62 | 63 | return struct.pack(b">l", 0) |
|
63 | 64 | |
|
64 | 65 | |
|
65 | 66 | def _fileheader(path): |
|
66 | 67 | """Obtain a changegroup chunk header for a named path.""" |
|
67 | 68 | return chunkheader(len(path)) + path |
|
68 | 69 | |
|
69 | 70 | |
|
70 | 71 | def writechunks(ui, chunks, filename, vfs=None): |
|
71 | 72 | """Write chunks to a file and return its filename. |
|
72 | 73 | |
|
73 | 74 | The stream is assumed to be a bundle file. |
|
74 | 75 | Existing files will not be overwritten. |
|
75 | 76 | If no filename is specified, a temporary file is created. |
|
76 | 77 | """ |
|
77 | 78 | fh = None |
|
78 | 79 | cleanup = None |
|
79 | 80 | try: |
|
80 | 81 | if filename: |
|
81 | 82 | if vfs: |
|
82 | 83 | fh = vfs.open(filename, b"wb") |
|
83 | 84 | else: |
|
84 | 85 | # Increase default buffer size because default is usually |
|
85 | 86 | # small (4k is common on Linux). |
|
86 | 87 | fh = open(filename, b"wb", 131072) |
|
87 | 88 | else: |
|
88 | 89 | fd, filename = pycompat.mkstemp(prefix=b"hg-bundle-", suffix=b".hg") |
|
89 | 90 | fh = os.fdopen(fd, "wb") |
|
90 | 91 | cleanup = filename |
|
91 | 92 | for c in chunks: |
|
92 | 93 | fh.write(c) |
|
93 | 94 | cleanup = None |
|
94 | 95 | return filename |
|
95 | 96 | finally: |
|
96 | 97 | if fh is not None: |
|
97 | 98 | fh.close() |
|
98 | 99 | if cleanup is not None: |
|
99 | 100 | if filename and vfs: |
|
100 | 101 | vfs.unlink(cleanup) |
|
101 | 102 | else: |
|
102 | 103 | os.unlink(cleanup) |
|
103 | 104 | |
|
104 | 105 | |
|
105 | 106 | class cg1unpacker(object): |
|
106 | 107 | """Unpacker for cg1 changegroup streams. |
|
107 | 108 | |
|
108 | 109 | A changegroup unpacker handles the framing of the revision data in |
|
109 | 110 | the wire format. Most consumers will want to use the apply() |
|
110 | 111 | method to add the changes from the changegroup to a repository. |
|
111 | 112 | |
|
112 | 113 | If you're forwarding a changegroup unmodified to another consumer, |
|
113 | 114 | use getchunks(), which returns an iterator of changegroup |
|
114 | 115 | chunks. This is mostly useful for cases where you need to know the |
|
115 | 116 | data stream has ended by observing the end of the changegroup. |
|
116 | 117 | |
|
117 | 118 | deltachunk() is useful only if you're applying delta data. Most |
|
118 | 119 | consumers should prefer apply() instead. |
|
119 | 120 | |
|
120 | 121 | A few other public methods exist. Those are used only for |
|
121 | 122 | bundlerepo and some debug commands - their use is discouraged. |
|
122 | 123 | """ |
|
123 | 124 | |
|
124 | 125 | deltaheader = _CHANGEGROUPV1_DELTA_HEADER |
|
125 | 126 | deltaheadersize = deltaheader.size |
|
126 | 127 | version = b'01' |
|
127 | 128 | _grouplistcount = 1 # One list of files after the manifests |
|
128 | 129 | |
|
129 | 130 | def __init__(self, fh, alg, extras=None): |
|
130 | 131 | if alg is None: |
|
131 | 132 | alg = b'UN' |
|
132 | 133 | if alg not in util.compengines.supportedbundletypes: |
|
133 | 134 | raise error.Abort(_(b'unknown stream compression type: %s') % alg) |
|
134 | 135 | if alg == b'BZ': |
|
135 | 136 | alg = b'_truncatedBZ' |
|
136 | 137 | |
|
137 | 138 | compengine = util.compengines.forbundletype(alg) |
|
138 | 139 | self._stream = compengine.decompressorreader(fh) |
|
139 | 140 | self._type = alg |
|
140 | 141 | self.extras = extras or {} |
|
141 | 142 | self.callback = None |
|
142 | 143 | |
|
143 | 144 | # These methods (compressed, read, seek, tell) all appear to only |
|
144 | 145 | # be used by bundlerepo, but it's a little hard to tell. |
|
145 | 146 | def compressed(self): |
|
146 | 147 | return self._type is not None and self._type != b'UN' |
|
147 | 148 | |
|
148 | 149 | def read(self, l): |
|
149 | 150 | return self._stream.read(l) |
|
150 | 151 | |
|
151 | 152 | def seek(self, pos): |
|
152 | 153 | return self._stream.seek(pos) |
|
153 | 154 | |
|
154 | 155 | def tell(self): |
|
155 | 156 | return self._stream.tell() |
|
156 | 157 | |
|
157 | 158 | def close(self): |
|
158 | 159 | return self._stream.close() |
|
159 | 160 | |
|
160 | 161 | def _chunklength(self): |
|
161 | 162 | d = readexactly(self._stream, 4) |
|
162 | 163 | l = struct.unpack(b">l", d)[0] |
|
163 | 164 | if l <= 4: |
|
164 | 165 | if l: |
|
165 | 166 | raise error.Abort(_(b"invalid chunk length %d") % l) |
|
166 | 167 | return 0 |
|
167 | 168 | if self.callback: |
|
168 | 169 | self.callback() |
|
169 | 170 | return l - 4 |
|
170 | 171 | |
|
171 | 172 | def changelogheader(self): |
|
172 | 173 | """v10 does not have a changelog header chunk""" |
|
173 | 174 | return {} |
|
174 | 175 | |
|
175 | 176 | def manifestheader(self): |
|
176 | 177 | """v10 does not have a manifest header chunk""" |
|
177 | 178 | return {} |
|
178 | 179 | |
|
179 | 180 | def filelogheader(self): |
|
180 | 181 | """return the header of the filelogs chunk, v10 only has the filename""" |
|
181 | 182 | l = self._chunklength() |
|
182 | 183 | if not l: |
|
183 | 184 | return {} |
|
184 | 185 | fname = readexactly(self._stream, l) |
|
185 | 186 | return {b'filename': fname} |
|
186 | 187 | |
|
187 | 188 | def _deltaheader(self, headertuple, prevnode): |
|
188 | 189 | node, p1, p2, cs = headertuple |
|
189 | 190 | if prevnode is None: |
|
190 | 191 | deltabase = p1 |
|
191 | 192 | else: |
|
192 | 193 | deltabase = prevnode |
|
193 | 194 | flags = 0 |
|
194 | 195 | return node, p1, p2, deltabase, cs, flags |
|
195 | 196 | |
|
196 | 197 | def deltachunk(self, prevnode): |
|
197 | 198 | l = self._chunklength() |
|
198 | 199 | if not l: |
|
199 | 200 | return {} |
|
200 | 201 | headerdata = readexactly(self._stream, self.deltaheadersize) |
|
201 | 202 | header = self.deltaheader.unpack(headerdata) |
|
202 | 203 | delta = readexactly(self._stream, l - self.deltaheadersize) |
|
203 | 204 | node, p1, p2, deltabase, cs, flags = self._deltaheader(header, prevnode) |
|
204 | 205 | return (node, p1, p2, cs, deltabase, delta, flags) |
|
205 | 206 | |
|
206 | 207 | def getchunks(self): |
|
207 | 208 | """returns all the chunks contains in the bundle |
|
208 | 209 | |
|
209 | 210 | Used when you need to forward the binary stream to a file or another |
|
210 | 211 | network API. To do so, it parse the changegroup data, otherwise it will |
|
211 | 212 | block in case of sshrepo because it don't know the end of the stream. |
|
212 | 213 | """ |
|
213 | 214 | # For changegroup 1 and 2, we expect 3 parts: changelog, manifestlog, |
|
214 | 215 | # and a list of filelogs. For changegroup 3, we expect 4 parts: |
|
215 | 216 | # changelog, manifestlog, a list of tree manifestlogs, and a list of |
|
216 | 217 | # filelogs. |
|
217 | 218 | # |
|
218 | 219 | # Changelog and manifestlog parts are terminated with empty chunks. The |
|
219 | 220 | # tree and file parts are a list of entry sections. Each entry section |
|
220 | 221 | # is a series of chunks terminating in an empty chunk. The list of these |
|
221 | 222 | # entry sections is terminated in yet another empty chunk, so we know |
|
222 | 223 | # we've reached the end of the tree/file list when we reach an empty |
|
223 | 224 | # chunk that was proceeded by no non-empty chunks. |
|
224 | 225 | |
|
225 | 226 | parts = 0 |
|
226 | 227 | while parts < 2 + self._grouplistcount: |
|
227 | 228 | noentries = True |
|
228 | 229 | while True: |
|
229 | 230 | chunk = getchunk(self) |
|
230 | 231 | if not chunk: |
|
231 | 232 | # The first two empty chunks represent the end of the |
|
232 | 233 | # changelog and the manifestlog portions. The remaining |
|
233 | 234 | # empty chunks represent either A) the end of individual |
|
234 | 235 | # tree or file entries in the file list, or B) the end of |
|
235 | 236 | # the entire list. It's the end of the entire list if there |
|
236 | 237 | # were no entries (i.e. noentries is True). |
|
237 | 238 | if parts < 2: |
|
238 | 239 | parts += 1 |
|
239 | 240 | elif noentries: |
|
240 | 241 | parts += 1 |
|
241 | 242 | break |
|
242 | 243 | noentries = False |
|
243 | 244 | yield chunkheader(len(chunk)) |
|
244 | 245 | pos = 0 |
|
245 | 246 | while pos < len(chunk): |
|
246 | 247 | next = pos + 2 ** 20 |
|
247 | 248 | yield chunk[pos:next] |
|
248 | 249 | pos = next |
|
249 | 250 | yield closechunk() |
|
250 | 251 | |
|
251 | 252 | def _unpackmanifests(self, repo, revmap, trp, prog): |
|
252 | 253 | self.callback = prog.increment |
|
253 | 254 | # no need to check for empty manifest group here: |
|
254 | 255 | # if the result of the merge of 1 and 2 is the same in 3 and 4, |
|
255 | 256 | # no new manifest will be created and the manifest group will |
|
256 | 257 | # be empty during the pull |
|
257 | 258 | self.manifestheader() |
|
258 | 259 | deltas = self.deltaiter() |
|
259 | 260 | repo.manifestlog.getstorage(b'').addgroup(deltas, revmap, trp) |
|
260 | 261 | prog.complete() |
|
261 | 262 | self.callback = None |
|
262 | 263 | |
|
263 | 264 | def apply( |
|
264 | 265 | self, |
|
265 | 266 | repo, |
|
266 | 267 | tr, |
|
267 | 268 | srctype, |
|
268 | 269 | url, |
|
269 | 270 | targetphase=phases.draft, |
|
270 | 271 | expectedtotal=None, |
|
271 | 272 | ): |
|
272 | 273 | """Add the changegroup returned by source.read() to this repo. |
|
273 | 274 | srctype is a string like 'push', 'pull', or 'unbundle'. url is |
|
274 | 275 | the URL of the repo where this changegroup is coming from. |
|
275 | 276 | |
|
276 | 277 | Return an integer summarizing the change to this repo: |
|
277 | 278 | - nothing changed or no source: 0 |
|
278 | 279 | - more heads than before: 1+added heads (2..n) |
|
279 | 280 | - fewer heads than before: -1-removed heads (-2..-n) |
|
280 | 281 | - number of heads stays the same: 1 |
|
281 | 282 | """ |
|
282 | 283 | repo = repo.unfiltered() |
|
283 | 284 | |
|
284 | 285 | def csmap(x): |
|
285 | 286 | repo.ui.debug(b"add changeset %s\n" % short(x)) |
|
286 | 287 | return len(cl) |
|
287 | 288 | |
|
288 | 289 | def revmap(x): |
|
289 | 290 | return cl.rev(x) |
|
290 | 291 | |
|
291 | 292 | try: |
|
292 | 293 | # The transaction may already carry source information. In this |
|
293 | 294 | # case we use the top level data. We overwrite the argument |
|
294 | 295 | # because we need to use the top level value (if they exist) |
|
295 | 296 | # in this function. |
|
296 | 297 | srctype = tr.hookargs.setdefault(b'source', srctype) |
|
297 | 298 | tr.hookargs.setdefault(b'url', url) |
|
298 | 299 | repo.hook( |
|
299 | 300 | b'prechangegroup', throw=True, **pycompat.strkwargs(tr.hookargs) |
|
300 | 301 | ) |
|
301 | 302 | |
|
302 | 303 | # write changelog data to temp files so concurrent readers |
|
303 | 304 | # will not see an inconsistent view |
|
304 | 305 | cl = repo.changelog |
|
305 | 306 | cl.delayupdate(tr) |
|
306 | 307 | oldheads = set(cl.heads()) |
|
307 | 308 | |
|
308 | 309 | trp = weakref.proxy(tr) |
|
309 | 310 | # pull off the changeset group |
|
310 | 311 | repo.ui.status(_(b"adding changesets\n")) |
|
311 | 312 | clstart = len(cl) |
|
312 | 313 | progress = repo.ui.makeprogress( |
|
313 | 314 | _(b'changesets'), unit=_(b'chunks'), total=expectedtotal |
|
314 | 315 | ) |
|
315 | 316 | self.callback = progress.increment |
|
316 | 317 | |
|
317 | 318 | efilesset = set() |
|
318 | 319 | |
|
319 | 320 | def onchangelog(cl, node): |
|
320 | 321 | efilesset.update(cl.readfiles(node)) |
|
321 | 322 | |
|
322 | 323 | self.changelogheader() |
|
323 | 324 | deltas = self.deltaiter() |
|
324 | 325 | cgnodes = cl.addgroup(deltas, csmap, trp, addrevisioncb=onchangelog) |
|
325 | 326 | efiles = len(efilesset) |
|
326 | 327 | |
|
327 | 328 | if not cgnodes: |
|
328 | 329 | repo.ui.develwarn( |
|
329 | 330 | b'applied empty changelog from changegroup', |
|
330 | 331 | config=b'warn-empty-changegroup', |
|
331 | 332 | ) |
|
332 | 333 | clend = len(cl) |
|
333 | 334 | changesets = clend - clstart |
|
334 | 335 | progress.complete() |
|
335 | 336 | self.callback = None |
|
336 | 337 | |
|
337 | 338 | # pull off the manifest group |
|
338 | 339 | repo.ui.status(_(b"adding manifests\n")) |
|
339 | 340 | # We know that we'll never have more manifests than we had |
|
340 | 341 | # changesets. |
|
341 | 342 | progress = repo.ui.makeprogress( |
|
342 | 343 | _(b'manifests'), unit=_(b'chunks'), total=changesets |
|
343 | 344 | ) |
|
344 | 345 | self._unpackmanifests(repo, revmap, trp, progress) |
|
345 | 346 | |
|
346 | 347 | needfiles = {} |
|
347 | 348 | if repo.ui.configbool(b'server', b'validate'): |
|
348 | 349 | cl = repo.changelog |
|
349 | 350 | ml = repo.manifestlog |
|
350 | 351 | # validate incoming csets have their manifests |
|
351 | 352 | for cset in pycompat.xrange(clstart, clend): |
|
352 | 353 | mfnode = cl.changelogrevision(cset).manifest |
|
353 | 354 | mfest = ml[mfnode].readdelta() |
|
354 | 355 | # store file cgnodes we must see |
|
355 | 356 | for f, n in pycompat.iteritems(mfest): |
|
356 | 357 | needfiles.setdefault(f, set()).add(n) |
|
357 | 358 | |
|
358 | 359 | # process the files |
|
359 | 360 | repo.ui.status(_(b"adding file changes\n")) |
|
360 | 361 | newrevs, newfiles = _addchangegroupfiles( |
|
361 | 362 | repo, self, revmap, trp, efiles, needfiles |
|
362 | 363 | ) |
|
363 | 364 | |
|
364 | 365 | # making sure the value exists |
|
365 | 366 | tr.changes.setdefault(b'changegroup-count-changesets', 0) |
|
366 | 367 | tr.changes.setdefault(b'changegroup-count-revisions', 0) |
|
367 | 368 | tr.changes.setdefault(b'changegroup-count-files', 0) |
|
368 | 369 | tr.changes.setdefault(b'changegroup-count-heads', 0) |
|
369 | 370 | |
|
370 | 371 | # some code use bundle operation for internal purpose. They usually |
|
371 | 372 | # set `ui.quiet` to do this outside of user sight. Size the report |
|
372 | 373 | # of such operation now happens at the end of the transaction, that |
|
373 | 374 | # ui.quiet has not direct effect on the output. |
|
374 | 375 | # |
|
375 | 376 | # To preserve this intend use an inelegant hack, we fail to report |
|
376 | 377 | # the change if `quiet` is set. We should probably move to |
|
377 | 378 | # something better, but this is a good first step to allow the "end |
|
378 | 379 | # of transaction report" to pass tests. |
|
379 | 380 | if not repo.ui.quiet: |
|
380 | 381 | tr.changes[b'changegroup-count-changesets'] += changesets |
|
381 | 382 | tr.changes[b'changegroup-count-revisions'] += newrevs |
|
382 | 383 | tr.changes[b'changegroup-count-files'] += newfiles |
|
383 | 384 | |
|
384 | 385 | deltaheads = 0 |
|
385 | 386 | if oldheads: |
|
386 | 387 | heads = cl.heads() |
|
387 | 388 | deltaheads += len(heads) - len(oldheads) |
|
388 | 389 | for h in heads: |
|
389 | 390 | if h not in oldheads and repo[h].closesbranch(): |
|
390 | 391 | deltaheads -= 1 |
|
391 | 392 | |
|
392 | 393 | # see previous comment about checking ui.quiet |
|
393 | 394 | if not repo.ui.quiet: |
|
394 | 395 | tr.changes[b'changegroup-count-heads'] += deltaheads |
|
395 | 396 | repo.invalidatevolatilesets() |
|
396 | 397 | |
|
397 | 398 | if changesets > 0: |
|
398 | 399 | if b'node' not in tr.hookargs: |
|
399 | 400 | tr.hookargs[b'node'] = hex(cl.node(clstart)) |
|
400 | 401 | tr.hookargs[b'node_last'] = hex(cl.node(clend - 1)) |
|
401 | 402 | hookargs = dict(tr.hookargs) |
|
402 | 403 | else: |
|
403 | 404 | hookargs = dict(tr.hookargs) |
|
404 | 405 | hookargs[b'node'] = hex(cl.node(clstart)) |
|
405 | 406 | hookargs[b'node_last'] = hex(cl.node(clend - 1)) |
|
406 | 407 | repo.hook( |
|
407 | 408 | b'pretxnchangegroup', |
|
408 | 409 | throw=True, |
|
409 | 410 | **pycompat.strkwargs(hookargs) |
|
410 | 411 | ) |
|
411 | 412 | |
|
412 | 413 | added = [cl.node(r) for r in pycompat.xrange(clstart, clend)] |
|
413 | 414 | phaseall = None |
|
414 | 415 | if srctype in (b'push', b'serve'): |
|
415 | 416 | # Old servers can not push the boundary themselves. |
|
416 | 417 | # New servers won't push the boundary if changeset already |
|
417 | 418 | # exists locally as secret |
|
418 | 419 | # |
|
419 | 420 | # We should not use added here but the list of all change in |
|
420 | 421 | # the bundle |
|
421 | 422 | if repo.publishing(): |
|
422 | 423 | targetphase = phaseall = phases.public |
|
423 | 424 | else: |
|
424 | 425 | # closer target phase computation |
|
425 | 426 | |
|
426 | 427 | # Those changesets have been pushed from the |
|
427 | 428 | # outside, their phases are going to be pushed |
|
428 | 429 | # alongside. Therefor `targetphase` is |
|
429 | 430 | # ignored. |
|
430 | 431 | targetphase = phaseall = phases.draft |
|
431 | 432 | if added: |
|
432 | 433 | phases.registernew(repo, tr, targetphase, added) |
|
433 | 434 | if phaseall is not None: |
|
434 | 435 | phases.advanceboundary(repo, tr, phaseall, cgnodes) |
|
435 | 436 | |
|
436 | 437 | if changesets > 0: |
|
437 | 438 | |
|
438 | 439 | def runhooks(unused_success): |
|
439 | 440 | # These hooks run when the lock releases, not when the |
|
440 | 441 | # transaction closes. So it's possible for the changelog |
|
441 | 442 | # to have changed since we last saw it. |
|
442 | 443 | if clstart >= len(repo): |
|
443 | 444 | return |
|
444 | 445 | |
|
445 | 446 | repo.hook(b"changegroup", **pycompat.strkwargs(hookargs)) |
|
446 | 447 | |
|
447 | 448 | for n in added: |
|
448 | 449 | args = hookargs.copy() |
|
449 | 450 | args[b'node'] = hex(n) |
|
450 | 451 | del args[b'node_last'] |
|
451 | 452 | repo.hook(b"incoming", **pycompat.strkwargs(args)) |
|
452 | 453 | |
|
453 | 454 | newheads = [h for h in repo.heads() if h not in oldheads] |
|
454 | 455 | repo.ui.log( |
|
455 | 456 | b"incoming", |
|
456 | 457 | b"%d incoming changes - new heads: %s\n", |
|
457 | 458 | len(added), |
|
458 | 459 | b', '.join([hex(c[:6]) for c in newheads]), |
|
459 | 460 | ) |
|
460 | 461 | |
|
461 | 462 | tr.addpostclose( |
|
462 | 463 | b'changegroup-runhooks-%020i' % clstart, |
|
463 | 464 | lambda tr: repo._afterlock(runhooks), |
|
464 | 465 | ) |
|
465 | 466 | finally: |
|
466 | 467 | repo.ui.flush() |
|
467 | 468 | # never return 0 here: |
|
468 | 469 | if deltaheads < 0: |
|
469 | 470 | ret = deltaheads - 1 |
|
470 | 471 | else: |
|
471 | 472 | ret = deltaheads + 1 |
|
472 | 473 | return ret |
|
473 | 474 | |
|
474 | 475 | def deltaiter(self): |
|
475 | 476 | """ |
|
476 | 477 | returns an iterator of the deltas in this changegroup |
|
477 | 478 | |
|
478 | 479 | Useful for passing to the underlying storage system to be stored. |
|
479 | 480 | """ |
|
480 | 481 | chain = None |
|
481 | 482 | for chunkdata in iter(lambda: self.deltachunk(chain), {}): |
|
482 | 483 | # Chunkdata: (node, p1, p2, cs, deltabase, delta, flags) |
|
483 | 484 | yield chunkdata |
|
484 | 485 | chain = chunkdata[0] |
|
485 | 486 | |
|
486 | 487 | |
|
487 | 488 | class cg2unpacker(cg1unpacker): |
|
488 | 489 | """Unpacker for cg2 streams. |
|
489 | 490 | |
|
490 | 491 | cg2 streams add support for generaldelta, so the delta header |
|
491 | 492 | format is slightly different. All other features about the data |
|
492 | 493 | remain the same. |
|
493 | 494 | """ |
|
494 | 495 | |
|
495 | 496 | deltaheader = _CHANGEGROUPV2_DELTA_HEADER |
|
496 | 497 | deltaheadersize = deltaheader.size |
|
497 | 498 | version = b'02' |
|
498 | 499 | |
|
499 | 500 | def _deltaheader(self, headertuple, prevnode): |
|
500 | 501 | node, p1, p2, deltabase, cs = headertuple |
|
501 | 502 | flags = 0 |
|
502 | 503 | return node, p1, p2, deltabase, cs, flags |
|
503 | 504 | |
|
504 | 505 | |
|
505 | 506 | class cg3unpacker(cg2unpacker): |
|
506 | 507 | """Unpacker for cg3 streams. |
|
507 | 508 | |
|
508 | 509 | cg3 streams add support for exchanging treemanifests and revlog |
|
509 | 510 | flags. It adds the revlog flags to the delta header and an empty chunk |
|
510 | 511 | separating manifests and files. |
|
511 | 512 | """ |
|
512 | 513 | |
|
513 | 514 | deltaheader = _CHANGEGROUPV3_DELTA_HEADER |
|
514 | 515 | deltaheadersize = deltaheader.size |
|
515 | 516 | version = b'03' |
|
516 | 517 | _grouplistcount = 2 # One list of manifests and one list of files |
|
517 | 518 | |
|
518 | 519 | def _deltaheader(self, headertuple, prevnode): |
|
519 | 520 | node, p1, p2, deltabase, cs, flags = headertuple |
|
520 | 521 | return node, p1, p2, deltabase, cs, flags |
|
521 | 522 | |
|
522 | 523 | def _unpackmanifests(self, repo, revmap, trp, prog): |
|
523 | 524 | super(cg3unpacker, self)._unpackmanifests(repo, revmap, trp, prog) |
|
524 | 525 | for chunkdata in iter(self.filelogheader, {}): |
|
525 | 526 | # If we get here, there are directory manifests in the changegroup |
|
526 | 527 | d = chunkdata[b"filename"] |
|
527 | 528 | repo.ui.debug(b"adding %s revisions\n" % d) |
|
528 | 529 | deltas = self.deltaiter() |
|
529 | 530 | if not repo.manifestlog.getstorage(d).addgroup(deltas, revmap, trp): |
|
530 | 531 | raise error.Abort(_(b"received dir revlog group is empty")) |
|
531 | 532 | |
|
532 | 533 | |
|
533 | 534 | class headerlessfixup(object): |
|
534 | 535 | def __init__(self, fh, h): |
|
535 | 536 | self._h = h |
|
536 | 537 | self._fh = fh |
|
537 | 538 | |
|
538 | 539 | def read(self, n): |
|
539 | 540 | if self._h: |
|
540 | 541 | d, self._h = self._h[:n], self._h[n:] |
|
541 | 542 | if len(d) < n: |
|
542 | 543 | d += readexactly(self._fh, n - len(d)) |
|
543 | 544 | return d |
|
544 | 545 | return readexactly(self._fh, n) |
|
545 | 546 | |
|
546 | 547 | |
|
547 | 548 | def _revisiondeltatochunks(delta, headerfn): |
|
548 | 549 | """Serialize a revisiondelta to changegroup chunks.""" |
|
549 | 550 | |
|
550 | 551 | # The captured revision delta may be encoded as a delta against |
|
551 | 552 | # a base revision or as a full revision. The changegroup format |
|
552 | 553 | # requires that everything on the wire be deltas. So for full |
|
553 | 554 | # revisions, we need to invent a header that says to rewrite |
|
554 | 555 | # data. |
|
555 | 556 | |
|
556 | 557 | if delta.delta is not None: |
|
557 | 558 | prefix, data = b'', delta.delta |
|
558 | 559 | elif delta.basenode == nullid: |
|
559 | 560 | data = delta.revision |
|
560 | 561 | prefix = mdiff.trivialdiffheader(len(data)) |
|
561 | 562 | else: |
|
562 | 563 | data = delta.revision |
|
563 | 564 | prefix = mdiff.replacediffheader(delta.baserevisionsize, len(data)) |
|
564 | 565 | |
|
565 | 566 | meta = headerfn(delta) |
|
566 | 567 | |
|
567 | 568 | yield chunkheader(len(meta) + len(prefix) + len(data)) |
|
568 | 569 | yield meta |
|
569 | 570 | if prefix: |
|
570 | 571 | yield prefix |
|
571 | 572 | yield data |
|
572 | 573 | |
|
573 | 574 | |
|
574 | 575 | def _sortnodesellipsis(store, nodes, cl, lookup): |
|
575 | 576 | """Sort nodes for changegroup generation.""" |
|
576 | 577 | # Ellipses serving mode. |
|
577 | 578 | # |
|
578 | 579 | # In a perfect world, we'd generate better ellipsis-ified graphs |
|
579 | 580 | # for non-changelog revlogs. In practice, we haven't started doing |
|
580 | 581 | # that yet, so the resulting DAGs for the manifestlog and filelogs |
|
581 | 582 | # are actually full of bogus parentage on all the ellipsis |
|
582 | 583 | # nodes. This has the side effect that, while the contents are |
|
583 | 584 | # correct, the individual DAGs might be completely out of whack in |
|
584 | 585 | # a case like 882681bc3166 and its ancestors (back about 10 |
|
585 | 586 | # revisions or so) in the main hg repo. |
|
586 | 587 | # |
|
587 | 588 | # The one invariant we *know* holds is that the new (potentially |
|
588 | 589 | # bogus) DAG shape will be valid if we order the nodes in the |
|
589 | 590 | # order that they're introduced in dramatis personae by the |
|
590 | 591 | # changelog, so what we do is we sort the non-changelog histories |
|
591 | 592 | # by the order in which they are used by the changelog. |
|
592 | 593 | key = lambda n: cl.rev(lookup(n)) |
|
593 | 594 | return sorted(nodes, key=key) |
|
594 | 595 | |
|
595 | 596 | |
|
596 | 597 | def _resolvenarrowrevisioninfo( |
|
597 | 598 | cl, |
|
598 | 599 | store, |
|
599 | 600 | ischangelog, |
|
600 | 601 | rev, |
|
601 | 602 | linkrev, |
|
602 | 603 | linknode, |
|
603 | 604 | clrevtolocalrev, |
|
604 | 605 | fullclnodes, |
|
605 | 606 | precomputedellipsis, |
|
606 | 607 | ): |
|
607 | 608 | linkparents = precomputedellipsis[linkrev] |
|
608 | 609 | |
|
609 | 610 | def local(clrev): |
|
610 | 611 | """Turn a changelog revnum into a local revnum. |
|
611 | 612 | |
|
612 | 613 | The ellipsis dag is stored as revnums on the changelog, |
|
613 | 614 | but when we're producing ellipsis entries for |
|
614 | 615 | non-changelog revlogs, we need to turn those numbers into |
|
615 | 616 | something local. This does that for us, and during the |
|
616 | 617 | changelog sending phase will also expand the stored |
|
617 | 618 | mappings as needed. |
|
618 | 619 | """ |
|
619 | 620 | if clrev == nullrev: |
|
620 | 621 | return nullrev |
|
621 | 622 | |
|
622 | 623 | if ischangelog: |
|
623 | 624 | return clrev |
|
624 | 625 | |
|
625 | 626 | # Walk the ellipsis-ized changelog breadth-first looking for a |
|
626 | 627 | # change that has been linked from the current revlog. |
|
627 | 628 | # |
|
628 | 629 | # For a flat manifest revlog only a single step should be necessary |
|
629 | 630 | # as all relevant changelog entries are relevant to the flat |
|
630 | 631 | # manifest. |
|
631 | 632 | # |
|
632 | 633 | # For a filelog or tree manifest dirlog however not every changelog |
|
633 | 634 | # entry will have been relevant, so we need to skip some changelog |
|
634 | 635 | # nodes even after ellipsis-izing. |
|
635 | 636 | walk = [clrev] |
|
636 | 637 | while walk: |
|
637 | 638 | p = walk[0] |
|
638 | 639 | walk = walk[1:] |
|
639 | 640 | if p in clrevtolocalrev: |
|
640 | 641 | return clrevtolocalrev[p] |
|
641 | 642 | elif p in fullclnodes: |
|
642 | 643 | walk.extend([pp for pp in cl.parentrevs(p) if pp != nullrev]) |
|
643 | 644 | elif p in precomputedellipsis: |
|
644 | 645 | walk.extend( |
|
645 | 646 | [pp for pp in precomputedellipsis[p] if pp != nullrev] |
|
646 | 647 | ) |
|
647 | 648 | else: |
|
648 | 649 | # In this case, we've got an ellipsis with parents |
|
649 | 650 | # outside the current bundle (likely an |
|
650 | 651 | # incremental pull). We "know" that we can use the |
|
651 | 652 | # value of this same revlog at whatever revision |
|
652 | 653 | # is pointed to by linknode. "Know" is in scare |
|
653 | 654 | # quotes because I haven't done enough examination |
|
654 | 655 | # of edge cases to convince myself this is really |
|
655 | 656 | # a fact - it works for all the (admittedly |
|
656 | 657 | # thorough) cases in our testsuite, but I would be |
|
657 | 658 | # somewhat unsurprised to find a case in the wild |
|
658 | 659 | # where this breaks down a bit. That said, I don't |
|
659 | 660 | # know if it would hurt anything. |
|
660 | 661 | for i in pycompat.xrange(rev, 0, -1): |
|
661 | 662 | if store.linkrev(i) == clrev: |
|
662 | 663 | return i |
|
663 | 664 | # We failed to resolve a parent for this node, so |
|
664 | 665 | # we crash the changegroup construction. |
|
665 | 666 | raise error.Abort( |
|
666 | 667 | b'unable to resolve parent while packing %r %r' |
|
667 | 668 | b' for changeset %r' % (store.indexfile, rev, clrev) |
|
668 | 669 | ) |
|
669 | 670 | |
|
670 | 671 | return nullrev |
|
671 | 672 | |
|
672 | 673 | if not linkparents or (store.parentrevs(rev) == (nullrev, nullrev)): |
|
673 | 674 | p1, p2 = nullrev, nullrev |
|
674 | 675 | elif len(linkparents) == 1: |
|
675 | 676 | (p1,) = sorted(local(p) for p in linkparents) |
|
676 | 677 | p2 = nullrev |
|
677 | 678 | else: |
|
678 | 679 | p1, p2 = sorted(local(p) for p in linkparents) |
|
679 | 680 | |
|
680 | 681 | p1node, p2node = store.node(p1), store.node(p2) |
|
681 | 682 | |
|
682 | 683 | return p1node, p2node, linknode |
|
683 | 684 | |
|
684 | 685 | |
|
685 | 686 | def deltagroup( |
|
686 | 687 | repo, |
|
687 | 688 | store, |
|
688 | 689 | nodes, |
|
689 | 690 | ischangelog, |
|
690 | 691 | lookup, |
|
691 | 692 | forcedeltaparentprev, |
|
692 | 693 | topic=None, |
|
693 | 694 | ellipses=False, |
|
694 | 695 | clrevtolocalrev=None, |
|
695 | 696 | fullclnodes=None, |
|
696 | 697 | precomputedellipsis=None, |
|
697 | 698 | ): |
|
698 | 699 | """Calculate deltas for a set of revisions. |
|
699 | 700 | |
|
700 | 701 | Is a generator of ``revisiondelta`` instances. |
|
701 | 702 | |
|
702 | 703 | If topic is not None, progress detail will be generated using this |
|
703 | 704 | topic name (e.g. changesets, manifests, etc). |
|
704 | 705 | """ |
|
705 | 706 | if not nodes: |
|
706 | 707 | return |
|
707 | 708 | |
|
708 | 709 | cl = repo.changelog |
|
709 | 710 | |
|
710 | 711 | if ischangelog: |
|
711 | 712 | # `hg log` shows changesets in storage order. To preserve order |
|
712 | 713 | # across clones, send out changesets in storage order. |
|
713 | 714 | nodesorder = b'storage' |
|
714 | 715 | elif ellipses: |
|
715 | 716 | nodes = _sortnodesellipsis(store, nodes, cl, lookup) |
|
716 | 717 | nodesorder = b'nodes' |
|
717 | 718 | else: |
|
718 | 719 | nodesorder = None |
|
719 | 720 | |
|
720 | 721 | # Perform ellipses filtering and revision massaging. We do this before |
|
721 | 722 | # emitrevisions() because a) filtering out revisions creates less work |
|
722 | 723 | # for emitrevisions() b) dropping revisions would break emitrevisions()'s |
|
723 | 724 | # assumptions about delta choices and we would possibly send a delta |
|
724 | 725 | # referencing a missing base revision. |
|
725 | 726 | # |
|
726 | 727 | # Also, calling lookup() has side-effects with regards to populating |
|
727 | 728 | # data structures. If we don't call lookup() for each node or if we call |
|
728 | 729 | # lookup() after the first pass through each node, things can break - |
|
729 | 730 | # possibly intermittently depending on the python hash seed! For that |
|
730 | 731 | # reason, we store a mapping of all linknodes during the initial node |
|
731 | 732 | # pass rather than use lookup() on the output side. |
|
732 | 733 | if ellipses: |
|
733 | 734 | filtered = [] |
|
734 | 735 | adjustedparents = {} |
|
735 | 736 | linknodes = {} |
|
736 | 737 | |
|
737 | 738 | for node in nodes: |
|
738 | 739 | rev = store.rev(node) |
|
739 | 740 | linknode = lookup(node) |
|
740 | 741 | linkrev = cl.rev(linknode) |
|
741 | 742 | clrevtolocalrev[linkrev] = rev |
|
742 | 743 | |
|
743 | 744 | # If linknode is in fullclnodes, it means the corresponding |
|
744 | 745 | # changeset was a full changeset and is being sent unaltered. |
|
745 | 746 | if linknode in fullclnodes: |
|
746 | 747 | linknodes[node] = linknode |
|
747 | 748 | |
|
748 | 749 | # If the corresponding changeset wasn't in the set computed |
|
749 | 750 | # as relevant to us, it should be dropped outright. |
|
750 | 751 | elif linkrev not in precomputedellipsis: |
|
751 | 752 | continue |
|
752 | 753 | |
|
753 | 754 | else: |
|
754 | 755 | # We could probably do this later and avoid the dict |
|
755 | 756 | # holding state. But it likely doesn't matter. |
|
756 | 757 | p1node, p2node, linknode = _resolvenarrowrevisioninfo( |
|
757 | 758 | cl, |
|
758 | 759 | store, |
|
759 | 760 | ischangelog, |
|
760 | 761 | rev, |
|
761 | 762 | linkrev, |
|
762 | 763 | linknode, |
|
763 | 764 | clrevtolocalrev, |
|
764 | 765 | fullclnodes, |
|
765 | 766 | precomputedellipsis, |
|
766 | 767 | ) |
|
767 | 768 | |
|
768 | 769 | adjustedparents[node] = (p1node, p2node) |
|
769 | 770 | linknodes[node] = linknode |
|
770 | 771 | |
|
771 | 772 | filtered.append(node) |
|
772 | 773 | |
|
773 | 774 | nodes = filtered |
|
774 | 775 | |
|
775 | 776 | # We expect the first pass to be fast, so we only engage the progress |
|
776 | 777 | # meter for constructing the revision deltas. |
|
777 | 778 | progress = None |
|
778 | 779 | if topic is not None: |
|
779 | 780 | progress = repo.ui.makeprogress( |
|
780 | 781 | topic, unit=_(b'chunks'), total=len(nodes) |
|
781 | 782 | ) |
|
782 | 783 | |
|
783 | 784 | configtarget = repo.ui.config(b'devel', b'bundle.delta') |
|
784 | 785 | if configtarget not in (b'', b'p1', b'full'): |
|
785 | 786 | msg = _("""config "devel.bundle.delta" as unknown value: %s""") |
|
786 | 787 | repo.ui.warn(msg % configtarget) |
|
787 | 788 | |
|
788 | 789 | deltamode = repository.CG_DELTAMODE_STD |
|
789 | 790 | if forcedeltaparentprev: |
|
790 | 791 | deltamode = repository.CG_DELTAMODE_PREV |
|
791 | 792 | elif configtarget == b'p1': |
|
792 | 793 | deltamode = repository.CG_DELTAMODE_P1 |
|
793 | 794 | elif configtarget == b'full': |
|
794 | 795 | deltamode = repository.CG_DELTAMODE_FULL |
|
795 | 796 | |
|
796 | 797 | revisions = store.emitrevisions( |
|
797 | 798 | nodes, |
|
798 | 799 | nodesorder=nodesorder, |
|
799 | 800 | revisiondata=True, |
|
800 | 801 | assumehaveparentrevisions=not ellipses, |
|
801 | 802 | deltamode=deltamode, |
|
802 | 803 | ) |
|
803 | 804 | |
|
804 | 805 | for i, revision in enumerate(revisions): |
|
805 | 806 | if progress: |
|
806 | 807 | progress.update(i + 1) |
|
807 | 808 | |
|
808 | 809 | if ellipses: |
|
809 | 810 | linknode = linknodes[revision.node] |
|
810 | 811 | |
|
811 | 812 | if revision.node in adjustedparents: |
|
812 | 813 | p1node, p2node = adjustedparents[revision.node] |
|
813 | 814 | revision.p1node = p1node |
|
814 | 815 | revision.p2node = p2node |
|
815 | 816 | revision.flags |= repository.REVISION_FLAG_ELLIPSIS |
|
816 | 817 | |
|
817 | 818 | else: |
|
818 | 819 | linknode = lookup(revision.node) |
|
819 | 820 | |
|
820 | 821 | revision.linknode = linknode |
|
821 | 822 | yield revision |
|
822 | 823 | |
|
823 | 824 | if progress: |
|
824 | 825 | progress.complete() |
|
825 | 826 | |
|
826 | 827 | |
|
827 | 828 | class cgpacker(object): |
|
828 | 829 | def __init__( |
|
829 | 830 | self, |
|
830 | 831 | repo, |
|
831 | 832 | oldmatcher, |
|
832 | 833 | matcher, |
|
833 | 834 | version, |
|
834 | 835 | builddeltaheader, |
|
835 | 836 | manifestsend, |
|
836 | 837 | forcedeltaparentprev=False, |
|
837 | 838 | bundlecaps=None, |
|
838 | 839 | ellipses=False, |
|
839 | 840 | shallow=False, |
|
840 | 841 | ellipsisroots=None, |
|
841 | 842 | fullnodes=None, |
|
842 | 843 | ): |
|
843 | 844 | """Given a source repo, construct a bundler. |
|
844 | 845 | |
|
845 | 846 | oldmatcher is a matcher that matches on files the client already has. |
|
846 | 847 | These will not be included in the changegroup. |
|
847 | 848 | |
|
848 | 849 | matcher is a matcher that matches on files to include in the |
|
849 | 850 | changegroup. Used to facilitate sparse changegroups. |
|
850 | 851 | |
|
851 | 852 | forcedeltaparentprev indicates whether delta parents must be against |
|
852 | 853 | the previous revision in a delta group. This should only be used for |
|
853 | 854 | compatibility with changegroup version 1. |
|
854 | 855 | |
|
855 | 856 | builddeltaheader is a callable that constructs the header for a group |
|
856 | 857 | delta. |
|
857 | 858 | |
|
858 | 859 | manifestsend is a chunk to send after manifests have been fully emitted. |
|
859 | 860 | |
|
860 | 861 | ellipses indicates whether ellipsis serving mode is enabled. |
|
861 | 862 | |
|
862 | 863 | bundlecaps is optional and can be used to specify the set of |
|
863 | 864 | capabilities which can be used to build the bundle. While bundlecaps is |
|
864 | 865 | unused in core Mercurial, extensions rely on this feature to communicate |
|
865 | 866 | capabilities to customize the changegroup packer. |
|
866 | 867 | |
|
867 | 868 | shallow indicates whether shallow data might be sent. The packer may |
|
868 | 869 | need to pack file contents not introduced by the changes being packed. |
|
869 | 870 | |
|
870 | 871 | fullnodes is the set of changelog nodes which should not be ellipsis |
|
871 | 872 | nodes. We store this rather than the set of nodes that should be |
|
872 | 873 | ellipsis because for very large histories we expect this to be |
|
873 | 874 | significantly smaller. |
|
874 | 875 | """ |
|
875 | 876 | assert oldmatcher |
|
876 | 877 | assert matcher |
|
877 | 878 | self._oldmatcher = oldmatcher |
|
878 | 879 | self._matcher = matcher |
|
879 | 880 | |
|
880 | 881 | self.version = version |
|
881 | 882 | self._forcedeltaparentprev = forcedeltaparentprev |
|
882 | 883 | self._builddeltaheader = builddeltaheader |
|
883 | 884 | self._manifestsend = manifestsend |
|
884 | 885 | self._ellipses = ellipses |
|
885 | 886 | |
|
886 | 887 | # Set of capabilities we can use to build the bundle. |
|
887 | 888 | if bundlecaps is None: |
|
888 | 889 | bundlecaps = set() |
|
889 | 890 | self._bundlecaps = bundlecaps |
|
890 | 891 | self._isshallow = shallow |
|
891 | 892 | self._fullclnodes = fullnodes |
|
892 | 893 | |
|
893 | 894 | # Maps ellipsis revs to their roots at the changelog level. |
|
894 | 895 | self._precomputedellipsis = ellipsisroots |
|
895 | 896 | |
|
896 | 897 | self._repo = repo |
|
897 | 898 | |
|
898 | 899 | if self._repo.ui.verbose and not self._repo.ui.debugflag: |
|
899 | 900 | self._verbosenote = self._repo.ui.note |
|
900 | 901 | else: |
|
901 | 902 | self._verbosenote = lambda s: None |
|
902 | 903 | |
|
903 | 904 | def generate( |
|
904 | 905 | self, commonrevs, clnodes, fastpathlinkrev, source, changelog=True |
|
905 | 906 | ): |
|
906 | 907 | """Yield a sequence of changegroup byte chunks. |
|
907 | 908 | If changelog is False, changelog data won't be added to changegroup |
|
908 | 909 | """ |
|
909 | 910 | |
|
910 | 911 | repo = self._repo |
|
911 | 912 | cl = repo.changelog |
|
912 | 913 | |
|
913 | 914 | self._verbosenote(_(b'uncompressed size of bundle content:\n')) |
|
914 | 915 | size = 0 |
|
915 | 916 | |
|
916 | 917 | clstate, deltas = self._generatechangelog( |
|
917 | 918 | cl, clnodes, generate=changelog |
|
918 | 919 | ) |
|
919 | 920 | for delta in deltas: |
|
920 | 921 | for chunk in _revisiondeltatochunks(delta, self._builddeltaheader): |
|
921 | 922 | size += len(chunk) |
|
922 | 923 | yield chunk |
|
923 | 924 | |
|
924 | 925 | close = closechunk() |
|
925 | 926 | size += len(close) |
|
926 | 927 | yield closechunk() |
|
927 | 928 | |
|
928 | 929 | self._verbosenote(_(b'%8.i (changelog)\n') % size) |
|
929 | 930 | |
|
930 | 931 | clrevorder = clstate[b'clrevorder'] |
|
931 | 932 | manifests = clstate[b'manifests'] |
|
932 | 933 | changedfiles = clstate[b'changedfiles'] |
|
933 | 934 | |
|
934 | 935 | # We need to make sure that the linkrev in the changegroup refers to |
|
935 | 936 | # the first changeset that introduced the manifest or file revision. |
|
936 | 937 | # The fastpath is usually safer than the slowpath, because the filelogs |
|
937 | 938 | # are walked in revlog order. |
|
938 | 939 | # |
|
939 | 940 | # When taking the slowpath when the manifest revlog uses generaldelta, |
|
940 | 941 | # the manifest may be walked in the "wrong" order. Without 'clrevorder', |
|
941 | 942 | # we would get an incorrect linkrev (see fix in cc0ff93d0c0c). |
|
942 | 943 | # |
|
943 | 944 | # When taking the fastpath, we are only vulnerable to reordering |
|
944 | 945 | # of the changelog itself. The changelog never uses generaldelta and is |
|
945 | 946 | # never reordered. To handle this case, we simply take the slowpath, |
|
946 | 947 | # which already has the 'clrevorder' logic. This was also fixed in |
|
947 | 948 | # cc0ff93d0c0c. |
|
948 | 949 | |
|
949 | 950 | # Treemanifests don't work correctly with fastpathlinkrev |
|
950 | 951 | # either, because we don't discover which directory nodes to |
|
951 | 952 | # send along with files. This could probably be fixed. |
|
952 | fastpathlinkrev = fastpathlinkrev and ( | |
|
953 | requirements.TREEMANIFEST_REQUIREMENT not in repo.requirements | |
|
954 | ) | |
|
953 | fastpathlinkrev = fastpathlinkrev and not scmutil.istreemanifest(repo) | |
|
955 | 954 | |
|
956 | 955 | fnodes = {} # needed file nodes |
|
957 | 956 | |
|
958 | 957 | size = 0 |
|
959 | 958 | it = self.generatemanifests( |
|
960 | 959 | commonrevs, |
|
961 | 960 | clrevorder, |
|
962 | 961 | fastpathlinkrev, |
|
963 | 962 | manifests, |
|
964 | 963 | fnodes, |
|
965 | 964 | source, |
|
966 | 965 | clstate[b'clrevtomanifestrev'], |
|
967 | 966 | ) |
|
968 | 967 | |
|
969 | 968 | for tree, deltas in it: |
|
970 | 969 | if tree: |
|
971 | 970 | assert self.version == b'03' |
|
972 | 971 | chunk = _fileheader(tree) |
|
973 | 972 | size += len(chunk) |
|
974 | 973 | yield chunk |
|
975 | 974 | |
|
976 | 975 | for delta in deltas: |
|
977 | 976 | chunks = _revisiondeltatochunks(delta, self._builddeltaheader) |
|
978 | 977 | for chunk in chunks: |
|
979 | 978 | size += len(chunk) |
|
980 | 979 | yield chunk |
|
981 | 980 | |
|
982 | 981 | close = closechunk() |
|
983 | 982 | size += len(close) |
|
984 | 983 | yield close |
|
985 | 984 | |
|
986 | 985 | self._verbosenote(_(b'%8.i (manifests)\n') % size) |
|
987 | 986 | yield self._manifestsend |
|
988 | 987 | |
|
989 | 988 | mfdicts = None |
|
990 | 989 | if self._ellipses and self._isshallow: |
|
991 | 990 | mfdicts = [ |
|
992 | 991 | (self._repo.manifestlog[n].read(), lr) |
|
993 | 992 | for (n, lr) in pycompat.iteritems(manifests) |
|
994 | 993 | ] |
|
995 | 994 | |
|
996 | 995 | manifests.clear() |
|
997 | 996 | clrevs = {cl.rev(x) for x in clnodes} |
|
998 | 997 | |
|
999 | 998 | it = self.generatefiles( |
|
1000 | 999 | changedfiles, |
|
1001 | 1000 | commonrevs, |
|
1002 | 1001 | source, |
|
1003 | 1002 | mfdicts, |
|
1004 | 1003 | fastpathlinkrev, |
|
1005 | 1004 | fnodes, |
|
1006 | 1005 | clrevs, |
|
1007 | 1006 | ) |
|
1008 | 1007 | |
|
1009 | 1008 | for path, deltas in it: |
|
1010 | 1009 | h = _fileheader(path) |
|
1011 | 1010 | size = len(h) |
|
1012 | 1011 | yield h |
|
1013 | 1012 | |
|
1014 | 1013 | for delta in deltas: |
|
1015 | 1014 | chunks = _revisiondeltatochunks(delta, self._builddeltaheader) |
|
1016 | 1015 | for chunk in chunks: |
|
1017 | 1016 | size += len(chunk) |
|
1018 | 1017 | yield chunk |
|
1019 | 1018 | |
|
1020 | 1019 | close = closechunk() |
|
1021 | 1020 | size += len(close) |
|
1022 | 1021 | yield close |
|
1023 | 1022 | |
|
1024 | 1023 | self._verbosenote(_(b'%8.i %s\n') % (size, path)) |
|
1025 | 1024 | |
|
1026 | 1025 | yield closechunk() |
|
1027 | 1026 | |
|
1028 | 1027 | if clnodes: |
|
1029 | 1028 | repo.hook(b'outgoing', node=hex(clnodes[0]), source=source) |
|
1030 | 1029 | |
|
1031 | 1030 | def _generatechangelog(self, cl, nodes, generate=True): |
|
1032 | 1031 | """Generate data for changelog chunks. |
|
1033 | 1032 | |
|
1034 | 1033 | Returns a 2-tuple of a dict containing state and an iterable of |
|
1035 | 1034 | byte chunks. The state will not be fully populated until the |
|
1036 | 1035 | chunk stream has been fully consumed. |
|
1037 | 1036 | |
|
1038 | 1037 | if generate is False, the state will be fully populated and no chunk |
|
1039 | 1038 | stream will be yielded |
|
1040 | 1039 | """ |
|
1041 | 1040 | clrevorder = {} |
|
1042 | 1041 | manifests = {} |
|
1043 | 1042 | mfl = self._repo.manifestlog |
|
1044 | 1043 | changedfiles = set() |
|
1045 | 1044 | clrevtomanifestrev = {} |
|
1046 | 1045 | |
|
1047 | 1046 | state = { |
|
1048 | 1047 | b'clrevorder': clrevorder, |
|
1049 | 1048 | b'manifests': manifests, |
|
1050 | 1049 | b'changedfiles': changedfiles, |
|
1051 | 1050 | b'clrevtomanifestrev': clrevtomanifestrev, |
|
1052 | 1051 | } |
|
1053 | 1052 | |
|
1054 | 1053 | if not (generate or self._ellipses): |
|
1055 | 1054 | # sort the nodes in storage order |
|
1056 | 1055 | nodes = sorted(nodes, key=cl.rev) |
|
1057 | 1056 | for node in nodes: |
|
1058 | 1057 | c = cl.changelogrevision(node) |
|
1059 | 1058 | clrevorder[node] = len(clrevorder) |
|
1060 | 1059 | # record the first changeset introducing this manifest version |
|
1061 | 1060 | manifests.setdefault(c.manifest, node) |
|
1062 | 1061 | # Record a complete list of potentially-changed files in |
|
1063 | 1062 | # this manifest. |
|
1064 | 1063 | changedfiles.update(c.files) |
|
1065 | 1064 | |
|
1066 | 1065 | return state, () |
|
1067 | 1066 | |
|
1068 | 1067 | # Callback for the changelog, used to collect changed files and |
|
1069 | 1068 | # manifest nodes. |
|
1070 | 1069 | # Returns the linkrev node (identity in the changelog case). |
|
1071 | 1070 | def lookupcl(x): |
|
1072 | 1071 | c = cl.changelogrevision(x) |
|
1073 | 1072 | clrevorder[x] = len(clrevorder) |
|
1074 | 1073 | |
|
1075 | 1074 | if self._ellipses: |
|
1076 | 1075 | # Only update manifests if x is going to be sent. Otherwise we |
|
1077 | 1076 | # end up with bogus linkrevs specified for manifests and |
|
1078 | 1077 | # we skip some manifest nodes that we should otherwise |
|
1079 | 1078 | # have sent. |
|
1080 | 1079 | if ( |
|
1081 | 1080 | x in self._fullclnodes |
|
1082 | 1081 | or cl.rev(x) in self._precomputedellipsis |
|
1083 | 1082 | ): |
|
1084 | 1083 | |
|
1085 | 1084 | manifestnode = c.manifest |
|
1086 | 1085 | # Record the first changeset introducing this manifest |
|
1087 | 1086 | # version. |
|
1088 | 1087 | manifests.setdefault(manifestnode, x) |
|
1089 | 1088 | # Set this narrow-specific dict so we have the lowest |
|
1090 | 1089 | # manifest revnum to look up for this cl revnum. (Part of |
|
1091 | 1090 | # mapping changelog ellipsis parents to manifest ellipsis |
|
1092 | 1091 | # parents) |
|
1093 | 1092 | clrevtomanifestrev.setdefault( |
|
1094 | 1093 | cl.rev(x), mfl.rev(manifestnode) |
|
1095 | 1094 | ) |
|
1096 | 1095 | # We can't trust the changed files list in the changeset if the |
|
1097 | 1096 | # client requested a shallow clone. |
|
1098 | 1097 | if self._isshallow: |
|
1099 | 1098 | changedfiles.update(mfl[c.manifest].read().keys()) |
|
1100 | 1099 | else: |
|
1101 | 1100 | changedfiles.update(c.files) |
|
1102 | 1101 | else: |
|
1103 | 1102 | # record the first changeset introducing this manifest version |
|
1104 | 1103 | manifests.setdefault(c.manifest, x) |
|
1105 | 1104 | # Record a complete list of potentially-changed files in |
|
1106 | 1105 | # this manifest. |
|
1107 | 1106 | changedfiles.update(c.files) |
|
1108 | 1107 | |
|
1109 | 1108 | return x |
|
1110 | 1109 | |
|
1111 | 1110 | gen = deltagroup( |
|
1112 | 1111 | self._repo, |
|
1113 | 1112 | cl, |
|
1114 | 1113 | nodes, |
|
1115 | 1114 | True, |
|
1116 | 1115 | lookupcl, |
|
1117 | 1116 | self._forcedeltaparentprev, |
|
1118 | 1117 | ellipses=self._ellipses, |
|
1119 | 1118 | topic=_(b'changesets'), |
|
1120 | 1119 | clrevtolocalrev={}, |
|
1121 | 1120 | fullclnodes=self._fullclnodes, |
|
1122 | 1121 | precomputedellipsis=self._precomputedellipsis, |
|
1123 | 1122 | ) |
|
1124 | 1123 | |
|
1125 | 1124 | return state, gen |
|
1126 | 1125 | |
|
1127 | 1126 | def generatemanifests( |
|
1128 | 1127 | self, |
|
1129 | 1128 | commonrevs, |
|
1130 | 1129 | clrevorder, |
|
1131 | 1130 | fastpathlinkrev, |
|
1132 | 1131 | manifests, |
|
1133 | 1132 | fnodes, |
|
1134 | 1133 | source, |
|
1135 | 1134 | clrevtolocalrev, |
|
1136 | 1135 | ): |
|
1137 | 1136 | """Returns an iterator of changegroup chunks containing manifests. |
|
1138 | 1137 | |
|
1139 | 1138 | `source` is unused here, but is used by extensions like remotefilelog to |
|
1140 | 1139 | change what is sent based in pulls vs pushes, etc. |
|
1141 | 1140 | """ |
|
1142 | 1141 | repo = self._repo |
|
1143 | 1142 | mfl = repo.manifestlog |
|
1144 | 1143 | tmfnodes = {b'': manifests} |
|
1145 | 1144 | |
|
1146 | 1145 | # Callback for the manifest, used to collect linkrevs for filelog |
|
1147 | 1146 | # revisions. |
|
1148 | 1147 | # Returns the linkrev node (collected in lookupcl). |
|
1149 | 1148 | def makelookupmflinknode(tree, nodes): |
|
1150 | 1149 | if fastpathlinkrev: |
|
1151 | 1150 | assert not tree |
|
1152 | 1151 | return ( |
|
1153 | 1152 | manifests.__getitem__ |
|
1154 | 1153 | ) # pytype: disable=unsupported-operands |
|
1155 | 1154 | |
|
1156 | 1155 | def lookupmflinknode(x): |
|
1157 | 1156 | """Callback for looking up the linknode for manifests. |
|
1158 | 1157 | |
|
1159 | 1158 | Returns the linkrev node for the specified manifest. |
|
1160 | 1159 | |
|
1161 | 1160 | SIDE EFFECT: |
|
1162 | 1161 | |
|
1163 | 1162 | 1) fclnodes gets populated with the list of relevant |
|
1164 | 1163 | file nodes if we're not using fastpathlinkrev |
|
1165 | 1164 | 2) When treemanifests are in use, collects treemanifest nodes |
|
1166 | 1165 | to send |
|
1167 | 1166 | |
|
1168 | 1167 | Note that this means manifests must be completely sent to |
|
1169 | 1168 | the client before you can trust the list of files and |
|
1170 | 1169 | treemanifests to send. |
|
1171 | 1170 | """ |
|
1172 | 1171 | clnode = nodes[x] |
|
1173 | 1172 | mdata = mfl.get(tree, x).readfast(shallow=True) |
|
1174 | 1173 | for p, n, fl in mdata.iterentries(): |
|
1175 | 1174 | if fl == b't': # subdirectory manifest |
|
1176 | 1175 | subtree = tree + p + b'/' |
|
1177 | 1176 | tmfclnodes = tmfnodes.setdefault(subtree, {}) |
|
1178 | 1177 | tmfclnode = tmfclnodes.setdefault(n, clnode) |
|
1179 | 1178 | if clrevorder[clnode] < clrevorder[tmfclnode]: |
|
1180 | 1179 | tmfclnodes[n] = clnode |
|
1181 | 1180 | else: |
|
1182 | 1181 | f = tree + p |
|
1183 | 1182 | fclnodes = fnodes.setdefault(f, {}) |
|
1184 | 1183 | fclnode = fclnodes.setdefault(n, clnode) |
|
1185 | 1184 | if clrevorder[clnode] < clrevorder[fclnode]: |
|
1186 | 1185 | fclnodes[n] = clnode |
|
1187 | 1186 | return clnode |
|
1188 | 1187 | |
|
1189 | 1188 | return lookupmflinknode |
|
1190 | 1189 | |
|
1191 | 1190 | while tmfnodes: |
|
1192 | 1191 | tree, nodes = tmfnodes.popitem() |
|
1193 | 1192 | |
|
1194 | 1193 | should_visit = self._matcher.visitdir(tree[:-1]) |
|
1195 | 1194 | if tree and not should_visit: |
|
1196 | 1195 | continue |
|
1197 | 1196 | |
|
1198 | 1197 | store = mfl.getstorage(tree) |
|
1199 | 1198 | |
|
1200 | 1199 | if not should_visit: |
|
1201 | 1200 | # No nodes to send because this directory is out of |
|
1202 | 1201 | # the client's view of the repository (probably |
|
1203 | 1202 | # because of narrow clones). Do this even for the root |
|
1204 | 1203 | # directory (tree=='') |
|
1205 | 1204 | prunednodes = [] |
|
1206 | 1205 | else: |
|
1207 | 1206 | # Avoid sending any manifest nodes we can prove the |
|
1208 | 1207 | # client already has by checking linkrevs. See the |
|
1209 | 1208 | # related comment in generatefiles(). |
|
1210 | 1209 | prunednodes = self._prunemanifests(store, nodes, commonrevs) |
|
1211 | 1210 | |
|
1212 | 1211 | if tree and not prunednodes: |
|
1213 | 1212 | continue |
|
1214 | 1213 | |
|
1215 | 1214 | lookupfn = makelookupmflinknode(tree, nodes) |
|
1216 | 1215 | |
|
1217 | 1216 | deltas = deltagroup( |
|
1218 | 1217 | self._repo, |
|
1219 | 1218 | store, |
|
1220 | 1219 | prunednodes, |
|
1221 | 1220 | False, |
|
1222 | 1221 | lookupfn, |
|
1223 | 1222 | self._forcedeltaparentprev, |
|
1224 | 1223 | ellipses=self._ellipses, |
|
1225 | 1224 | topic=_(b'manifests'), |
|
1226 | 1225 | clrevtolocalrev=clrevtolocalrev, |
|
1227 | 1226 | fullclnodes=self._fullclnodes, |
|
1228 | 1227 | precomputedellipsis=self._precomputedellipsis, |
|
1229 | 1228 | ) |
|
1230 | 1229 | |
|
1231 | 1230 | if not self._oldmatcher.visitdir(store.tree[:-1]): |
|
1232 | 1231 | yield tree, deltas |
|
1233 | 1232 | else: |
|
1234 | 1233 | # 'deltas' is a generator and we need to consume it even if |
|
1235 | 1234 | # we are not going to send it because a side-effect is that |
|
1236 | 1235 | # it updates tmdnodes (via lookupfn) |
|
1237 | 1236 | for d in deltas: |
|
1238 | 1237 | pass |
|
1239 | 1238 | if not tree: |
|
1240 | 1239 | yield tree, [] |
|
1241 | 1240 | |
|
1242 | 1241 | def _prunemanifests(self, store, nodes, commonrevs): |
|
1243 | 1242 | if not self._ellipses: |
|
1244 | 1243 | # In non-ellipses case and large repositories, it is better to |
|
1245 | 1244 | # prevent calling of store.rev and store.linkrev on a lot of |
|
1246 | 1245 | # nodes as compared to sending some extra data |
|
1247 | 1246 | return nodes.copy() |
|
1248 | 1247 | # This is split out as a separate method to allow filtering |
|
1249 | 1248 | # commonrevs in extension code. |
|
1250 | 1249 | # |
|
1251 | 1250 | # TODO(augie): this shouldn't be required, instead we should |
|
1252 | 1251 | # make filtering of revisions to send delegated to the store |
|
1253 | 1252 | # layer. |
|
1254 | 1253 | frev, flr = store.rev, store.linkrev |
|
1255 | 1254 | return [n for n in nodes if flr(frev(n)) not in commonrevs] |
|
1256 | 1255 | |
|
1257 | 1256 | # The 'source' parameter is useful for extensions |
|
1258 | 1257 | def generatefiles( |
|
1259 | 1258 | self, |
|
1260 | 1259 | changedfiles, |
|
1261 | 1260 | commonrevs, |
|
1262 | 1261 | source, |
|
1263 | 1262 | mfdicts, |
|
1264 | 1263 | fastpathlinkrev, |
|
1265 | 1264 | fnodes, |
|
1266 | 1265 | clrevs, |
|
1267 | 1266 | ): |
|
1268 | 1267 | changedfiles = [ |
|
1269 | 1268 | f |
|
1270 | 1269 | for f in changedfiles |
|
1271 | 1270 | if self._matcher(f) and not self._oldmatcher(f) |
|
1272 | 1271 | ] |
|
1273 | 1272 | |
|
1274 | 1273 | if not fastpathlinkrev: |
|
1275 | 1274 | |
|
1276 | 1275 | def normallinknodes(unused, fname): |
|
1277 | 1276 | return fnodes.get(fname, {}) |
|
1278 | 1277 | |
|
1279 | 1278 | else: |
|
1280 | 1279 | cln = self._repo.changelog.node |
|
1281 | 1280 | |
|
1282 | 1281 | def normallinknodes(store, fname): |
|
1283 | 1282 | flinkrev = store.linkrev |
|
1284 | 1283 | fnode = store.node |
|
1285 | 1284 | revs = ((r, flinkrev(r)) for r in store) |
|
1286 | 1285 | return {fnode(r): cln(lr) for r, lr in revs if lr in clrevs} |
|
1287 | 1286 | |
|
1288 | 1287 | clrevtolocalrev = {} |
|
1289 | 1288 | |
|
1290 | 1289 | if self._isshallow: |
|
1291 | 1290 | # In a shallow clone, the linknodes callback needs to also include |
|
1292 | 1291 | # those file nodes that are in the manifests we sent but weren't |
|
1293 | 1292 | # introduced by those manifests. |
|
1294 | 1293 | commonctxs = [self._repo[c] for c in commonrevs] |
|
1295 | 1294 | clrev = self._repo.changelog.rev |
|
1296 | 1295 | |
|
1297 | 1296 | def linknodes(flog, fname): |
|
1298 | 1297 | for c in commonctxs: |
|
1299 | 1298 | try: |
|
1300 | 1299 | fnode = c.filenode(fname) |
|
1301 | 1300 | clrevtolocalrev[c.rev()] = flog.rev(fnode) |
|
1302 | 1301 | except error.ManifestLookupError: |
|
1303 | 1302 | pass |
|
1304 | 1303 | links = normallinknodes(flog, fname) |
|
1305 | 1304 | if len(links) != len(mfdicts): |
|
1306 | 1305 | for mf, lr in mfdicts: |
|
1307 | 1306 | fnode = mf.get(fname, None) |
|
1308 | 1307 | if fnode in links: |
|
1309 | 1308 | links[fnode] = min(links[fnode], lr, key=clrev) |
|
1310 | 1309 | elif fnode: |
|
1311 | 1310 | links[fnode] = lr |
|
1312 | 1311 | return links |
|
1313 | 1312 | |
|
1314 | 1313 | else: |
|
1315 | 1314 | linknodes = normallinknodes |
|
1316 | 1315 | |
|
1317 | 1316 | repo = self._repo |
|
1318 | 1317 | progress = repo.ui.makeprogress( |
|
1319 | 1318 | _(b'files'), unit=_(b'files'), total=len(changedfiles) |
|
1320 | 1319 | ) |
|
1321 | 1320 | for i, fname in enumerate(sorted(changedfiles)): |
|
1322 | 1321 | filerevlog = repo.file(fname) |
|
1323 | 1322 | if not filerevlog: |
|
1324 | 1323 | raise error.Abort( |
|
1325 | 1324 | _(b"empty or missing file data for %s") % fname |
|
1326 | 1325 | ) |
|
1327 | 1326 | |
|
1328 | 1327 | clrevtolocalrev.clear() |
|
1329 | 1328 | |
|
1330 | 1329 | linkrevnodes = linknodes(filerevlog, fname) |
|
1331 | 1330 | # Lookup for filenodes, we collected the linkrev nodes above in the |
|
1332 | 1331 | # fastpath case and with lookupmf in the slowpath case. |
|
1333 | 1332 | def lookupfilelog(x): |
|
1334 | 1333 | return linkrevnodes[x] |
|
1335 | 1334 | |
|
1336 | 1335 | frev, flr = filerevlog.rev, filerevlog.linkrev |
|
1337 | 1336 | # Skip sending any filenode we know the client already |
|
1338 | 1337 | # has. This avoids over-sending files relatively |
|
1339 | 1338 | # inexpensively, so it's not a problem if we under-filter |
|
1340 | 1339 | # here. |
|
1341 | 1340 | filenodes = [ |
|
1342 | 1341 | n for n in linkrevnodes if flr(frev(n)) not in commonrevs |
|
1343 | 1342 | ] |
|
1344 | 1343 | |
|
1345 | 1344 | if not filenodes: |
|
1346 | 1345 | continue |
|
1347 | 1346 | |
|
1348 | 1347 | progress.update(i + 1, item=fname) |
|
1349 | 1348 | |
|
1350 | 1349 | deltas = deltagroup( |
|
1351 | 1350 | self._repo, |
|
1352 | 1351 | filerevlog, |
|
1353 | 1352 | filenodes, |
|
1354 | 1353 | False, |
|
1355 | 1354 | lookupfilelog, |
|
1356 | 1355 | self._forcedeltaparentprev, |
|
1357 | 1356 | ellipses=self._ellipses, |
|
1358 | 1357 | clrevtolocalrev=clrevtolocalrev, |
|
1359 | 1358 | fullclnodes=self._fullclnodes, |
|
1360 | 1359 | precomputedellipsis=self._precomputedellipsis, |
|
1361 | 1360 | ) |
|
1362 | 1361 | |
|
1363 | 1362 | yield fname, deltas |
|
1364 | 1363 | |
|
1365 | 1364 | progress.complete() |
|
1366 | 1365 | |
|
1367 | 1366 | |
|
1368 | 1367 | def _makecg1packer( |
|
1369 | 1368 | repo, |
|
1370 | 1369 | oldmatcher, |
|
1371 | 1370 | matcher, |
|
1372 | 1371 | bundlecaps, |
|
1373 | 1372 | ellipses=False, |
|
1374 | 1373 | shallow=False, |
|
1375 | 1374 | ellipsisroots=None, |
|
1376 | 1375 | fullnodes=None, |
|
1377 | 1376 | ): |
|
1378 | 1377 | builddeltaheader = lambda d: _CHANGEGROUPV1_DELTA_HEADER.pack( |
|
1379 | 1378 | d.node, d.p1node, d.p2node, d.linknode |
|
1380 | 1379 | ) |
|
1381 | 1380 | |
|
1382 | 1381 | return cgpacker( |
|
1383 | 1382 | repo, |
|
1384 | 1383 | oldmatcher, |
|
1385 | 1384 | matcher, |
|
1386 | 1385 | b'01', |
|
1387 | 1386 | builddeltaheader=builddeltaheader, |
|
1388 | 1387 | manifestsend=b'', |
|
1389 | 1388 | forcedeltaparentprev=True, |
|
1390 | 1389 | bundlecaps=bundlecaps, |
|
1391 | 1390 | ellipses=ellipses, |
|
1392 | 1391 | shallow=shallow, |
|
1393 | 1392 | ellipsisroots=ellipsisroots, |
|
1394 | 1393 | fullnodes=fullnodes, |
|
1395 | 1394 | ) |
|
1396 | 1395 | |
|
1397 | 1396 | |
|
1398 | 1397 | def _makecg2packer( |
|
1399 | 1398 | repo, |
|
1400 | 1399 | oldmatcher, |
|
1401 | 1400 | matcher, |
|
1402 | 1401 | bundlecaps, |
|
1403 | 1402 | ellipses=False, |
|
1404 | 1403 | shallow=False, |
|
1405 | 1404 | ellipsisroots=None, |
|
1406 | 1405 | fullnodes=None, |
|
1407 | 1406 | ): |
|
1408 | 1407 | builddeltaheader = lambda d: _CHANGEGROUPV2_DELTA_HEADER.pack( |
|
1409 | 1408 | d.node, d.p1node, d.p2node, d.basenode, d.linknode |
|
1410 | 1409 | ) |
|
1411 | 1410 | |
|
1412 | 1411 | return cgpacker( |
|
1413 | 1412 | repo, |
|
1414 | 1413 | oldmatcher, |
|
1415 | 1414 | matcher, |
|
1416 | 1415 | b'02', |
|
1417 | 1416 | builddeltaheader=builddeltaheader, |
|
1418 | 1417 | manifestsend=b'', |
|
1419 | 1418 | bundlecaps=bundlecaps, |
|
1420 | 1419 | ellipses=ellipses, |
|
1421 | 1420 | shallow=shallow, |
|
1422 | 1421 | ellipsisroots=ellipsisroots, |
|
1423 | 1422 | fullnodes=fullnodes, |
|
1424 | 1423 | ) |
|
1425 | 1424 | |
|
1426 | 1425 | |
|
1427 | 1426 | def _makecg3packer( |
|
1428 | 1427 | repo, |
|
1429 | 1428 | oldmatcher, |
|
1430 | 1429 | matcher, |
|
1431 | 1430 | bundlecaps, |
|
1432 | 1431 | ellipses=False, |
|
1433 | 1432 | shallow=False, |
|
1434 | 1433 | ellipsisroots=None, |
|
1435 | 1434 | fullnodes=None, |
|
1436 | 1435 | ): |
|
1437 | 1436 | builddeltaheader = lambda d: _CHANGEGROUPV3_DELTA_HEADER.pack( |
|
1438 | 1437 | d.node, d.p1node, d.p2node, d.basenode, d.linknode, d.flags |
|
1439 | 1438 | ) |
|
1440 | 1439 | |
|
1441 | 1440 | return cgpacker( |
|
1442 | 1441 | repo, |
|
1443 | 1442 | oldmatcher, |
|
1444 | 1443 | matcher, |
|
1445 | 1444 | b'03', |
|
1446 | 1445 | builddeltaheader=builddeltaheader, |
|
1447 | 1446 | manifestsend=closechunk(), |
|
1448 | 1447 | bundlecaps=bundlecaps, |
|
1449 | 1448 | ellipses=ellipses, |
|
1450 | 1449 | shallow=shallow, |
|
1451 | 1450 | ellipsisroots=ellipsisroots, |
|
1452 | 1451 | fullnodes=fullnodes, |
|
1453 | 1452 | ) |
|
1454 | 1453 | |
|
1455 | 1454 | |
|
1456 | 1455 | _packermap = { |
|
1457 | 1456 | b'01': (_makecg1packer, cg1unpacker), |
|
1458 | 1457 | # cg2 adds support for exchanging generaldelta |
|
1459 | 1458 | b'02': (_makecg2packer, cg2unpacker), |
|
1460 | 1459 | # cg3 adds support for exchanging revlog flags and treemanifests |
|
1461 | 1460 | b'03': (_makecg3packer, cg3unpacker), |
|
1462 | 1461 | } |
|
1463 | 1462 | |
|
1464 | 1463 | |
|
1465 | 1464 | def allsupportedversions(repo): |
|
1466 | 1465 | versions = set(_packermap.keys()) |
|
1467 | 1466 | needv03 = False |
|
1468 | 1467 | if ( |
|
1469 | 1468 | repo.ui.configbool(b'experimental', b'changegroup3') |
|
1470 | 1469 | or repo.ui.configbool(b'experimental', b'treemanifest') |
|
1471 | or requirements.TREEMANIFEST_REQUIREMENT in repo.requirements | |
|
1470 | or scmutil.istreemanifest(repo) | |
|
1472 | 1471 | ): |
|
1473 | 1472 | # we keep version 03 because we need to to exchange treemanifest data |
|
1474 | 1473 | # |
|
1475 | 1474 | # we also keep vresion 01 and 02, because it is possible for repo to |
|
1476 | 1475 | # contains both normal and tree manifest at the same time. so using |
|
1477 | 1476 | # older version to pull data is viable |
|
1478 | 1477 | # |
|
1479 | 1478 | # (or even to push subset of history) |
|
1480 | 1479 | needv03 = True |
|
1481 | 1480 | if b'exp-sidedata-flag' in repo.requirements: |
|
1482 | 1481 | needv03 = True |
|
1483 | 1482 | # don't attempt to use 01/02 until we do sidedata cleaning |
|
1484 | 1483 | versions.discard(b'01') |
|
1485 | 1484 | versions.discard(b'02') |
|
1486 | 1485 | if not needv03: |
|
1487 | 1486 | versions.discard(b'03') |
|
1488 | 1487 | return versions |
|
1489 | 1488 | |
|
1490 | 1489 | |
|
1491 | 1490 | # Changegroup versions that can be applied to the repo |
|
1492 | 1491 | def supportedincomingversions(repo): |
|
1493 | 1492 | return allsupportedversions(repo) |
|
1494 | 1493 | |
|
1495 | 1494 | |
|
1496 | 1495 | # Changegroup versions that can be created from the repo |
|
1497 | 1496 | def supportedoutgoingversions(repo): |
|
1498 | 1497 | versions = allsupportedversions(repo) |
|
1499 | if requirements.TREEMANIFEST_REQUIREMENT in repo.requirements: | |
|
1498 | if scmutil.istreemanifest(repo): | |
|
1500 | 1499 | # Versions 01 and 02 support only flat manifests and it's just too |
|
1501 | 1500 | # expensive to convert between the flat manifest and tree manifest on |
|
1502 | 1501 | # the fly. Since tree manifests are hashed differently, all of history |
|
1503 | 1502 | # would have to be converted. Instead, we simply don't even pretend to |
|
1504 | 1503 | # support versions 01 and 02. |
|
1505 | 1504 | versions.discard(b'01') |
|
1506 | 1505 | versions.discard(b'02') |
|
1507 | 1506 | if requirements.NARROW_REQUIREMENT in repo.requirements: |
|
1508 | 1507 | # Versions 01 and 02 don't support revlog flags, and we need to |
|
1509 | 1508 | # support that for stripping and unbundling to work. |
|
1510 | 1509 | versions.discard(b'01') |
|
1511 | 1510 | versions.discard(b'02') |
|
1512 | 1511 | if LFS_REQUIREMENT in repo.requirements: |
|
1513 | 1512 | # Versions 01 and 02 don't support revlog flags, and we need to |
|
1514 | 1513 | # mark LFS entries with REVIDX_EXTSTORED. |
|
1515 | 1514 | versions.discard(b'01') |
|
1516 | 1515 | versions.discard(b'02') |
|
1517 | 1516 | |
|
1518 | 1517 | return versions |
|
1519 | 1518 | |
|
1520 | 1519 | |
|
1521 | 1520 | def localversion(repo): |
|
1522 | 1521 | # Finds the best version to use for bundles that are meant to be used |
|
1523 | 1522 | # locally, such as those from strip and shelve, and temporary bundles. |
|
1524 | 1523 | return max(supportedoutgoingversions(repo)) |
|
1525 | 1524 | |
|
1526 | 1525 | |
|
1527 | 1526 | def safeversion(repo): |
|
1528 | 1527 | # Finds the smallest version that it's safe to assume clients of the repo |
|
1529 | 1528 | # will support. For example, all hg versions that support generaldelta also |
|
1530 | 1529 | # support changegroup 02. |
|
1531 | 1530 | versions = supportedoutgoingversions(repo) |
|
1532 | 1531 | if b'generaldelta' in repo.requirements: |
|
1533 | 1532 | versions.discard(b'01') |
|
1534 | 1533 | assert versions |
|
1535 | 1534 | return min(versions) |
|
1536 | 1535 | |
|
1537 | 1536 | |
|
1538 | 1537 | def getbundler( |
|
1539 | 1538 | version, |
|
1540 | 1539 | repo, |
|
1541 | 1540 | bundlecaps=None, |
|
1542 | 1541 | oldmatcher=None, |
|
1543 | 1542 | matcher=None, |
|
1544 | 1543 | ellipses=False, |
|
1545 | 1544 | shallow=False, |
|
1546 | 1545 | ellipsisroots=None, |
|
1547 | 1546 | fullnodes=None, |
|
1548 | 1547 | ): |
|
1549 | 1548 | assert version in supportedoutgoingversions(repo) |
|
1550 | 1549 | |
|
1551 | 1550 | if matcher is None: |
|
1552 | 1551 | matcher = matchmod.always() |
|
1553 | 1552 | if oldmatcher is None: |
|
1554 | 1553 | oldmatcher = matchmod.never() |
|
1555 | 1554 | |
|
1556 | 1555 | if version == b'01' and not matcher.always(): |
|
1557 | 1556 | raise error.ProgrammingError( |
|
1558 | 1557 | b'version 01 changegroups do not support sparse file matchers' |
|
1559 | 1558 | ) |
|
1560 | 1559 | |
|
1561 | 1560 | if ellipses and version in (b'01', b'02'): |
|
1562 | 1561 | raise error.Abort( |
|
1563 | 1562 | _( |
|
1564 | 1563 | b'ellipsis nodes require at least cg3 on client and server, ' |
|
1565 | 1564 | b'but negotiated version %s' |
|
1566 | 1565 | ) |
|
1567 | 1566 | % version |
|
1568 | 1567 | ) |
|
1569 | 1568 | |
|
1570 | 1569 | # Requested files could include files not in the local store. So |
|
1571 | 1570 | # filter those out. |
|
1572 | 1571 | matcher = repo.narrowmatch(matcher) |
|
1573 | 1572 | |
|
1574 | 1573 | fn = _packermap[version][0] |
|
1575 | 1574 | return fn( |
|
1576 | 1575 | repo, |
|
1577 | 1576 | oldmatcher, |
|
1578 | 1577 | matcher, |
|
1579 | 1578 | bundlecaps, |
|
1580 | 1579 | ellipses=ellipses, |
|
1581 | 1580 | shallow=shallow, |
|
1582 | 1581 | ellipsisroots=ellipsisroots, |
|
1583 | 1582 | fullnodes=fullnodes, |
|
1584 | 1583 | ) |
|
1585 | 1584 | |
|
1586 | 1585 | |
|
1587 | 1586 | def getunbundler(version, fh, alg, extras=None): |
|
1588 | 1587 | return _packermap[version][1](fh, alg, extras=extras) |
|
1589 | 1588 | |
|
1590 | 1589 | |
|
1591 | 1590 | def _changegroupinfo(repo, nodes, source): |
|
1592 | 1591 | if repo.ui.verbose or source == b'bundle': |
|
1593 | 1592 | repo.ui.status(_(b"%d changesets found\n") % len(nodes)) |
|
1594 | 1593 | if repo.ui.debugflag: |
|
1595 | 1594 | repo.ui.debug(b"list of changesets:\n") |
|
1596 | 1595 | for node in nodes: |
|
1597 | 1596 | repo.ui.debug(b"%s\n" % hex(node)) |
|
1598 | 1597 | |
|
1599 | 1598 | |
|
1600 | 1599 | def makechangegroup( |
|
1601 | 1600 | repo, outgoing, version, source, fastpath=False, bundlecaps=None |
|
1602 | 1601 | ): |
|
1603 | 1602 | cgstream = makestream( |
|
1604 | 1603 | repo, |
|
1605 | 1604 | outgoing, |
|
1606 | 1605 | version, |
|
1607 | 1606 | source, |
|
1608 | 1607 | fastpath=fastpath, |
|
1609 | 1608 | bundlecaps=bundlecaps, |
|
1610 | 1609 | ) |
|
1611 | 1610 | return getunbundler( |
|
1612 | 1611 | version, |
|
1613 | 1612 | util.chunkbuffer(cgstream), |
|
1614 | 1613 | None, |
|
1615 | 1614 | {b'clcount': len(outgoing.missing)}, |
|
1616 | 1615 | ) |
|
1617 | 1616 | |
|
1618 | 1617 | |
|
1619 | 1618 | def makestream( |
|
1620 | 1619 | repo, |
|
1621 | 1620 | outgoing, |
|
1622 | 1621 | version, |
|
1623 | 1622 | source, |
|
1624 | 1623 | fastpath=False, |
|
1625 | 1624 | bundlecaps=None, |
|
1626 | 1625 | matcher=None, |
|
1627 | 1626 | ): |
|
1628 | 1627 | bundler = getbundler(version, repo, bundlecaps=bundlecaps, matcher=matcher) |
|
1629 | 1628 | |
|
1630 | 1629 | repo = repo.unfiltered() |
|
1631 | 1630 | commonrevs = outgoing.common |
|
1632 | 1631 | csets = outgoing.missing |
|
1633 | 1632 | heads = outgoing.ancestorsof |
|
1634 | 1633 | # We go through the fast path if we get told to, or if all (unfiltered |
|
1635 | 1634 | # heads have been requested (since we then know there all linkrevs will |
|
1636 | 1635 | # be pulled by the client). |
|
1637 | 1636 | heads.sort() |
|
1638 | 1637 | fastpathlinkrev = fastpath or ( |
|
1639 | 1638 | repo.filtername is None and heads == sorted(repo.heads()) |
|
1640 | 1639 | ) |
|
1641 | 1640 | |
|
1642 | 1641 | repo.hook(b'preoutgoing', throw=True, source=source) |
|
1643 | 1642 | _changegroupinfo(repo, csets, source) |
|
1644 | 1643 | return bundler.generate(commonrevs, csets, fastpathlinkrev, source) |
|
1645 | 1644 | |
|
1646 | 1645 | |
|
1647 | 1646 | def _addchangegroupfiles(repo, source, revmap, trp, expectedfiles, needfiles): |
|
1648 | 1647 | revisions = 0 |
|
1649 | 1648 | files = 0 |
|
1650 | 1649 | progress = repo.ui.makeprogress( |
|
1651 | 1650 | _(b'files'), unit=_(b'files'), total=expectedfiles |
|
1652 | 1651 | ) |
|
1653 | 1652 | for chunkdata in iter(source.filelogheader, {}): |
|
1654 | 1653 | files += 1 |
|
1655 | 1654 | f = chunkdata[b"filename"] |
|
1656 | 1655 | repo.ui.debug(b"adding %s revisions\n" % f) |
|
1657 | 1656 | progress.increment() |
|
1658 | 1657 | fl = repo.file(f) |
|
1659 | 1658 | o = len(fl) |
|
1660 | 1659 | try: |
|
1661 | 1660 | deltas = source.deltaiter() |
|
1662 | 1661 | if not fl.addgroup(deltas, revmap, trp): |
|
1663 | 1662 | raise error.Abort(_(b"received file revlog group is empty")) |
|
1664 | 1663 | except error.CensoredBaseError as e: |
|
1665 | 1664 | raise error.Abort(_(b"received delta base is censored: %s") % e) |
|
1666 | 1665 | revisions += len(fl) - o |
|
1667 | 1666 | if f in needfiles: |
|
1668 | 1667 | needs = needfiles[f] |
|
1669 | 1668 | for new in pycompat.xrange(o, len(fl)): |
|
1670 | 1669 | n = fl.node(new) |
|
1671 | 1670 | if n in needs: |
|
1672 | 1671 | needs.remove(n) |
|
1673 | 1672 | else: |
|
1674 | 1673 | raise error.Abort(_(b"received spurious file revlog entry")) |
|
1675 | 1674 | if not needs: |
|
1676 | 1675 | del needfiles[f] |
|
1677 | 1676 | progress.complete() |
|
1678 | 1677 | |
|
1679 | 1678 | for f, needs in pycompat.iteritems(needfiles): |
|
1680 | 1679 | fl = repo.file(f) |
|
1681 | 1680 | for n in needs: |
|
1682 | 1681 | try: |
|
1683 | 1682 | fl.rev(n) |
|
1684 | 1683 | except error.LookupError: |
|
1685 | 1684 | raise error.Abort( |
|
1686 | 1685 | _(b'missing file data for %s:%s - run hg verify') |
|
1687 | 1686 | % (f, hex(n)) |
|
1688 | 1687 | ) |
|
1689 | 1688 | |
|
1690 | 1689 | return revisions, files |
@@ -1,4218 +1,4217 | |||
|
1 | 1 | # cmdutil.py - help for command processing in mercurial |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2005-2007 Matt Mackall <mpm@selenic.com> |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | from __future__ import absolute_import |
|
9 | 9 | |
|
10 | 10 | import copy as copymod |
|
11 | 11 | import errno |
|
12 | 12 | import os |
|
13 | 13 | import re |
|
14 | 14 | |
|
15 | 15 | from .i18n import _ |
|
16 | 16 | from .node import ( |
|
17 | 17 | hex, |
|
18 | 18 | nullid, |
|
19 | 19 | nullrev, |
|
20 | 20 | short, |
|
21 | 21 | ) |
|
22 | 22 | from .pycompat import ( |
|
23 | 23 | getattr, |
|
24 | 24 | open, |
|
25 | 25 | setattr, |
|
26 | 26 | ) |
|
27 | 27 | from .thirdparty import attr |
|
28 | 28 | |
|
29 | 29 | from . import ( |
|
30 | 30 | bookmarks, |
|
31 | 31 | changelog, |
|
32 | 32 | copies, |
|
33 | 33 | crecord as crecordmod, |
|
34 | 34 | dirstateguard, |
|
35 | 35 | encoding, |
|
36 | 36 | error, |
|
37 | 37 | formatter, |
|
38 | 38 | logcmdutil, |
|
39 | 39 | match as matchmod, |
|
40 | 40 | merge as mergemod, |
|
41 | 41 | mergestate as mergestatemod, |
|
42 | 42 | mergeutil, |
|
43 | 43 | obsolete, |
|
44 | 44 | patch, |
|
45 | 45 | pathutil, |
|
46 | 46 | phases, |
|
47 | 47 | pycompat, |
|
48 | 48 | repair, |
|
49 | requirements, | |
|
50 | 49 | revlog, |
|
51 | 50 | rewriteutil, |
|
52 | 51 | scmutil, |
|
53 | 52 | smartset, |
|
54 | 53 | state as statemod, |
|
55 | 54 | subrepoutil, |
|
56 | 55 | templatekw, |
|
57 | 56 | templater, |
|
58 | 57 | util, |
|
59 | 58 | vfs as vfsmod, |
|
60 | 59 | ) |
|
61 | 60 | |
|
62 | 61 | from .utils import ( |
|
63 | 62 | dateutil, |
|
64 | 63 | stringutil, |
|
65 | 64 | ) |
|
66 | 65 | |
|
67 | 66 | if pycompat.TYPE_CHECKING: |
|
68 | 67 | from typing import ( |
|
69 | 68 | Any, |
|
70 | 69 | Dict, |
|
71 | 70 | ) |
|
72 | 71 | |
|
73 | 72 | for t in (Any, Dict): |
|
74 | 73 | assert t |
|
75 | 74 | |
|
76 | 75 | stringio = util.stringio |
|
77 | 76 | |
|
78 | 77 | # templates of common command options |
|
79 | 78 | |
|
80 | 79 | dryrunopts = [ |
|
81 | 80 | (b'n', b'dry-run', None, _(b'do not perform actions, just print output')), |
|
82 | 81 | ] |
|
83 | 82 | |
|
84 | 83 | confirmopts = [ |
|
85 | 84 | (b'', b'confirm', None, _(b'ask before applying actions')), |
|
86 | 85 | ] |
|
87 | 86 | |
|
88 | 87 | remoteopts = [ |
|
89 | 88 | (b'e', b'ssh', b'', _(b'specify ssh command to use'), _(b'CMD')), |
|
90 | 89 | ( |
|
91 | 90 | b'', |
|
92 | 91 | b'remotecmd', |
|
93 | 92 | b'', |
|
94 | 93 | _(b'specify hg command to run on the remote side'), |
|
95 | 94 | _(b'CMD'), |
|
96 | 95 | ), |
|
97 | 96 | ( |
|
98 | 97 | b'', |
|
99 | 98 | b'insecure', |
|
100 | 99 | None, |
|
101 | 100 | _(b'do not verify server certificate (ignoring web.cacerts config)'), |
|
102 | 101 | ), |
|
103 | 102 | ] |
|
104 | 103 | |
|
105 | 104 | walkopts = [ |
|
106 | 105 | ( |
|
107 | 106 | b'I', |
|
108 | 107 | b'include', |
|
109 | 108 | [], |
|
110 | 109 | _(b'include names matching the given patterns'), |
|
111 | 110 | _(b'PATTERN'), |
|
112 | 111 | ), |
|
113 | 112 | ( |
|
114 | 113 | b'X', |
|
115 | 114 | b'exclude', |
|
116 | 115 | [], |
|
117 | 116 | _(b'exclude names matching the given patterns'), |
|
118 | 117 | _(b'PATTERN'), |
|
119 | 118 | ), |
|
120 | 119 | ] |
|
121 | 120 | |
|
122 | 121 | commitopts = [ |
|
123 | 122 | (b'm', b'message', b'', _(b'use text as commit message'), _(b'TEXT')), |
|
124 | 123 | (b'l', b'logfile', b'', _(b'read commit message from file'), _(b'FILE')), |
|
125 | 124 | ] |
|
126 | 125 | |
|
127 | 126 | commitopts2 = [ |
|
128 | 127 | ( |
|
129 | 128 | b'd', |
|
130 | 129 | b'date', |
|
131 | 130 | b'', |
|
132 | 131 | _(b'record the specified date as commit date'), |
|
133 | 132 | _(b'DATE'), |
|
134 | 133 | ), |
|
135 | 134 | ( |
|
136 | 135 | b'u', |
|
137 | 136 | b'user', |
|
138 | 137 | b'', |
|
139 | 138 | _(b'record the specified user as committer'), |
|
140 | 139 | _(b'USER'), |
|
141 | 140 | ), |
|
142 | 141 | ] |
|
143 | 142 | |
|
144 | 143 | commitopts3 = [ |
|
145 | 144 | (b'D', b'currentdate', None, _(b'record the current date as commit date')), |
|
146 | 145 | (b'U', b'currentuser', None, _(b'record the current user as committer')), |
|
147 | 146 | ] |
|
148 | 147 | |
|
149 | 148 | formatteropts = [ |
|
150 | 149 | (b'T', b'template', b'', _(b'display with template'), _(b'TEMPLATE')), |
|
151 | 150 | ] |
|
152 | 151 | |
|
153 | 152 | templateopts = [ |
|
154 | 153 | ( |
|
155 | 154 | b'', |
|
156 | 155 | b'style', |
|
157 | 156 | b'', |
|
158 | 157 | _(b'display using template map file (DEPRECATED)'), |
|
159 | 158 | _(b'STYLE'), |
|
160 | 159 | ), |
|
161 | 160 | (b'T', b'template', b'', _(b'display with template'), _(b'TEMPLATE')), |
|
162 | 161 | ] |
|
163 | 162 | |
|
164 | 163 | logopts = [ |
|
165 | 164 | (b'p', b'patch', None, _(b'show patch')), |
|
166 | 165 | (b'g', b'git', None, _(b'use git extended diff format')), |
|
167 | 166 | (b'l', b'limit', b'', _(b'limit number of changes displayed'), _(b'NUM')), |
|
168 | 167 | (b'M', b'no-merges', None, _(b'do not show merges')), |
|
169 | 168 | (b'', b'stat', None, _(b'output diffstat-style summary of changes')), |
|
170 | 169 | (b'G', b'graph', None, _(b"show the revision DAG")), |
|
171 | 170 | ] + templateopts |
|
172 | 171 | |
|
173 | 172 | diffopts = [ |
|
174 | 173 | (b'a', b'text', None, _(b'treat all files as text')), |
|
175 | 174 | ( |
|
176 | 175 | b'g', |
|
177 | 176 | b'git', |
|
178 | 177 | None, |
|
179 | 178 | _(b'use git extended diff format (DEFAULT: diff.git)'), |
|
180 | 179 | ), |
|
181 | 180 | (b'', b'binary', None, _(b'generate binary diffs in git mode (default)')), |
|
182 | 181 | (b'', b'nodates', None, _(b'omit dates from diff headers')), |
|
183 | 182 | ] |
|
184 | 183 | |
|
185 | 184 | diffwsopts = [ |
|
186 | 185 | ( |
|
187 | 186 | b'w', |
|
188 | 187 | b'ignore-all-space', |
|
189 | 188 | None, |
|
190 | 189 | _(b'ignore white space when comparing lines'), |
|
191 | 190 | ), |
|
192 | 191 | ( |
|
193 | 192 | b'b', |
|
194 | 193 | b'ignore-space-change', |
|
195 | 194 | None, |
|
196 | 195 | _(b'ignore changes in the amount of white space'), |
|
197 | 196 | ), |
|
198 | 197 | ( |
|
199 | 198 | b'B', |
|
200 | 199 | b'ignore-blank-lines', |
|
201 | 200 | None, |
|
202 | 201 | _(b'ignore changes whose lines are all blank'), |
|
203 | 202 | ), |
|
204 | 203 | ( |
|
205 | 204 | b'Z', |
|
206 | 205 | b'ignore-space-at-eol', |
|
207 | 206 | None, |
|
208 | 207 | _(b'ignore changes in whitespace at EOL'), |
|
209 | 208 | ), |
|
210 | 209 | ] |
|
211 | 210 | |
|
212 | 211 | diffopts2 = ( |
|
213 | 212 | [ |
|
214 | 213 | (b'', b'noprefix', None, _(b'omit a/ and b/ prefixes from filenames')), |
|
215 | 214 | ( |
|
216 | 215 | b'p', |
|
217 | 216 | b'show-function', |
|
218 | 217 | None, |
|
219 | 218 | _( |
|
220 | 219 | b'show which function each change is in (DEFAULT: diff.showfunc)' |
|
221 | 220 | ), |
|
222 | 221 | ), |
|
223 | 222 | (b'', b'reverse', None, _(b'produce a diff that undoes the changes')), |
|
224 | 223 | ] |
|
225 | 224 | + diffwsopts |
|
226 | 225 | + [ |
|
227 | 226 | ( |
|
228 | 227 | b'U', |
|
229 | 228 | b'unified', |
|
230 | 229 | b'', |
|
231 | 230 | _(b'number of lines of context to show'), |
|
232 | 231 | _(b'NUM'), |
|
233 | 232 | ), |
|
234 | 233 | (b'', b'stat', None, _(b'output diffstat-style summary of changes')), |
|
235 | 234 | ( |
|
236 | 235 | b'', |
|
237 | 236 | b'root', |
|
238 | 237 | b'', |
|
239 | 238 | _(b'produce diffs relative to subdirectory'), |
|
240 | 239 | _(b'DIR'), |
|
241 | 240 | ), |
|
242 | 241 | ] |
|
243 | 242 | ) |
|
244 | 243 | |
|
245 | 244 | mergetoolopts = [ |
|
246 | 245 | (b't', b'tool', b'', _(b'specify merge tool'), _(b'TOOL')), |
|
247 | 246 | ] |
|
248 | 247 | |
|
249 | 248 | similarityopts = [ |
|
250 | 249 | ( |
|
251 | 250 | b's', |
|
252 | 251 | b'similarity', |
|
253 | 252 | b'', |
|
254 | 253 | _(b'guess renamed files by similarity (0<=s<=100)'), |
|
255 | 254 | _(b'SIMILARITY'), |
|
256 | 255 | ) |
|
257 | 256 | ] |
|
258 | 257 | |
|
259 | 258 | subrepoopts = [(b'S', b'subrepos', None, _(b'recurse into subrepositories'))] |
|
260 | 259 | |
|
261 | 260 | debugrevlogopts = [ |
|
262 | 261 | (b'c', b'changelog', False, _(b'open changelog')), |
|
263 | 262 | (b'm', b'manifest', False, _(b'open manifest')), |
|
264 | 263 | (b'', b'dir', b'', _(b'open directory manifest')), |
|
265 | 264 | ] |
|
266 | 265 | |
|
267 | 266 | # special string such that everything below this line will be ingored in the |
|
268 | 267 | # editor text |
|
269 | 268 | _linebelow = b"^HG: ------------------------ >8 ------------------------$" |
|
270 | 269 | |
|
271 | 270 | |
|
272 | 271 | def check_at_most_one_arg(opts, *args): |
|
273 | 272 | """abort if more than one of the arguments are in opts |
|
274 | 273 | |
|
275 | 274 | Returns the unique argument or None if none of them were specified. |
|
276 | 275 | """ |
|
277 | 276 | |
|
278 | 277 | def to_display(name): |
|
279 | 278 | return pycompat.sysbytes(name).replace(b'_', b'-') |
|
280 | 279 | |
|
281 | 280 | previous = None |
|
282 | 281 | for x in args: |
|
283 | 282 | if opts.get(x): |
|
284 | 283 | if previous: |
|
285 | 284 | raise error.Abort( |
|
286 | 285 | _(b'cannot specify both --%s and --%s') |
|
287 | 286 | % (to_display(previous), to_display(x)) |
|
288 | 287 | ) |
|
289 | 288 | previous = x |
|
290 | 289 | return previous |
|
291 | 290 | |
|
292 | 291 | |
|
293 | 292 | def check_incompatible_arguments(opts, first, others): |
|
294 | 293 | """abort if the first argument is given along with any of the others |
|
295 | 294 | |
|
296 | 295 | Unlike check_at_most_one_arg(), `others` are not mutually exclusive |
|
297 | 296 | among themselves, and they're passed as a single collection. |
|
298 | 297 | """ |
|
299 | 298 | for other in others: |
|
300 | 299 | check_at_most_one_arg(opts, first, other) |
|
301 | 300 | |
|
302 | 301 | |
|
303 | 302 | def resolvecommitoptions(ui, opts): |
|
304 | 303 | """modify commit options dict to handle related options |
|
305 | 304 | |
|
306 | 305 | The return value indicates that ``rewrite.update-timestamp`` is the reason |
|
307 | 306 | the ``date`` option is set. |
|
308 | 307 | """ |
|
309 | 308 | check_at_most_one_arg(opts, b'date', b'currentdate') |
|
310 | 309 | check_at_most_one_arg(opts, b'user', b'currentuser') |
|
311 | 310 | |
|
312 | 311 | datemaydiffer = False # date-only change should be ignored? |
|
313 | 312 | |
|
314 | 313 | if opts.get(b'currentdate'): |
|
315 | 314 | opts[b'date'] = b'%d %d' % dateutil.makedate() |
|
316 | 315 | elif ( |
|
317 | 316 | not opts.get(b'date') |
|
318 | 317 | and ui.configbool(b'rewrite', b'update-timestamp') |
|
319 | 318 | and opts.get(b'currentdate') is None |
|
320 | 319 | ): |
|
321 | 320 | opts[b'date'] = b'%d %d' % dateutil.makedate() |
|
322 | 321 | datemaydiffer = True |
|
323 | 322 | |
|
324 | 323 | if opts.get(b'currentuser'): |
|
325 | 324 | opts[b'user'] = ui.username() |
|
326 | 325 | |
|
327 | 326 | return datemaydiffer |
|
328 | 327 | |
|
329 | 328 | |
|
330 | 329 | def checknotesize(ui, opts): |
|
331 | 330 | """ make sure note is of valid format """ |
|
332 | 331 | |
|
333 | 332 | note = opts.get(b'note') |
|
334 | 333 | if not note: |
|
335 | 334 | return |
|
336 | 335 | |
|
337 | 336 | if len(note) > 255: |
|
338 | 337 | raise error.Abort(_(b"cannot store a note of more than 255 bytes")) |
|
339 | 338 | if b'\n' in note: |
|
340 | 339 | raise error.Abort(_(b"note cannot contain a newline")) |
|
341 | 340 | |
|
342 | 341 | |
|
343 | 342 | def ishunk(x): |
|
344 | 343 | hunkclasses = (crecordmod.uihunk, patch.recordhunk) |
|
345 | 344 | return isinstance(x, hunkclasses) |
|
346 | 345 | |
|
347 | 346 | |
|
348 | 347 | def newandmodified(chunks, originalchunks): |
|
349 | 348 | newlyaddedandmodifiedfiles = set() |
|
350 | 349 | alsorestore = set() |
|
351 | 350 | for chunk in chunks: |
|
352 | 351 | if ( |
|
353 | 352 | ishunk(chunk) |
|
354 | 353 | and chunk.header.isnewfile() |
|
355 | 354 | and chunk not in originalchunks |
|
356 | 355 | ): |
|
357 | 356 | newlyaddedandmodifiedfiles.add(chunk.header.filename()) |
|
358 | 357 | alsorestore.update( |
|
359 | 358 | set(chunk.header.files()) - {chunk.header.filename()} |
|
360 | 359 | ) |
|
361 | 360 | return newlyaddedandmodifiedfiles, alsorestore |
|
362 | 361 | |
|
363 | 362 | |
|
364 | 363 | def parsealiases(cmd): |
|
365 | 364 | return cmd.split(b"|") |
|
366 | 365 | |
|
367 | 366 | |
|
368 | 367 | def setupwrapcolorwrite(ui): |
|
369 | 368 | # wrap ui.write so diff output can be labeled/colorized |
|
370 | 369 | def wrapwrite(orig, *args, **kw): |
|
371 | 370 | label = kw.pop('label', b'') |
|
372 | 371 | for chunk, l in patch.difflabel(lambda: args): |
|
373 | 372 | orig(chunk, label=label + l) |
|
374 | 373 | |
|
375 | 374 | oldwrite = ui.write |
|
376 | 375 | |
|
377 | 376 | def wrap(*args, **kwargs): |
|
378 | 377 | return wrapwrite(oldwrite, *args, **kwargs) |
|
379 | 378 | |
|
380 | 379 | setattr(ui, 'write', wrap) |
|
381 | 380 | return oldwrite |
|
382 | 381 | |
|
383 | 382 | |
|
384 | 383 | def filterchunks(ui, originalhunks, usecurses, testfile, match, operation=None): |
|
385 | 384 | try: |
|
386 | 385 | if usecurses: |
|
387 | 386 | if testfile: |
|
388 | 387 | recordfn = crecordmod.testdecorator( |
|
389 | 388 | testfile, crecordmod.testchunkselector |
|
390 | 389 | ) |
|
391 | 390 | else: |
|
392 | 391 | recordfn = crecordmod.chunkselector |
|
393 | 392 | |
|
394 | 393 | return crecordmod.filterpatch( |
|
395 | 394 | ui, originalhunks, recordfn, operation |
|
396 | 395 | ) |
|
397 | 396 | except crecordmod.fallbackerror as e: |
|
398 | 397 | ui.warn(b'%s\n' % e) |
|
399 | 398 | ui.warn(_(b'falling back to text mode\n')) |
|
400 | 399 | |
|
401 | 400 | return patch.filterpatch(ui, originalhunks, match, operation) |
|
402 | 401 | |
|
403 | 402 | |
|
404 | 403 | def recordfilter(ui, originalhunks, match, operation=None): |
|
405 | 404 | """ Prompts the user to filter the originalhunks and return a list of |
|
406 | 405 | selected hunks. |
|
407 | 406 | *operation* is used for to build ui messages to indicate the user what |
|
408 | 407 | kind of filtering they are doing: reverting, committing, shelving, etc. |
|
409 | 408 | (see patch.filterpatch). |
|
410 | 409 | """ |
|
411 | 410 | usecurses = crecordmod.checkcurses(ui) |
|
412 | 411 | testfile = ui.config(b'experimental', b'crecordtest') |
|
413 | 412 | oldwrite = setupwrapcolorwrite(ui) |
|
414 | 413 | try: |
|
415 | 414 | newchunks, newopts = filterchunks( |
|
416 | 415 | ui, originalhunks, usecurses, testfile, match, operation |
|
417 | 416 | ) |
|
418 | 417 | finally: |
|
419 | 418 | ui.write = oldwrite |
|
420 | 419 | return newchunks, newopts |
|
421 | 420 | |
|
422 | 421 | |
|
423 | 422 | def dorecord( |
|
424 | 423 | ui, repo, commitfunc, cmdsuggest, backupall, filterfn, *pats, **opts |
|
425 | 424 | ): |
|
426 | 425 | opts = pycompat.byteskwargs(opts) |
|
427 | 426 | if not ui.interactive(): |
|
428 | 427 | if cmdsuggest: |
|
429 | 428 | msg = _(b'running non-interactively, use %s instead') % cmdsuggest |
|
430 | 429 | else: |
|
431 | 430 | msg = _(b'running non-interactively') |
|
432 | 431 | raise error.Abort(msg) |
|
433 | 432 | |
|
434 | 433 | # make sure username is set before going interactive |
|
435 | 434 | if not opts.get(b'user'): |
|
436 | 435 | ui.username() # raise exception, username not provided |
|
437 | 436 | |
|
438 | 437 | def recordfunc(ui, repo, message, match, opts): |
|
439 | 438 | """This is generic record driver. |
|
440 | 439 | |
|
441 | 440 | Its job is to interactively filter local changes, and |
|
442 | 441 | accordingly prepare working directory into a state in which the |
|
443 | 442 | job can be delegated to a non-interactive commit command such as |
|
444 | 443 | 'commit' or 'qrefresh'. |
|
445 | 444 | |
|
446 | 445 | After the actual job is done by non-interactive command, the |
|
447 | 446 | working directory is restored to its original state. |
|
448 | 447 | |
|
449 | 448 | In the end we'll record interesting changes, and everything else |
|
450 | 449 | will be left in place, so the user can continue working. |
|
451 | 450 | """ |
|
452 | 451 | if not opts.get(b'interactive-unshelve'): |
|
453 | 452 | checkunfinished(repo, commit=True) |
|
454 | 453 | wctx = repo[None] |
|
455 | 454 | merge = len(wctx.parents()) > 1 |
|
456 | 455 | if merge: |
|
457 | 456 | raise error.Abort( |
|
458 | 457 | _( |
|
459 | 458 | b'cannot partially commit a merge ' |
|
460 | 459 | b'(use "hg commit" instead)' |
|
461 | 460 | ) |
|
462 | 461 | ) |
|
463 | 462 | |
|
464 | 463 | def fail(f, msg): |
|
465 | 464 | raise error.Abort(b'%s: %s' % (f, msg)) |
|
466 | 465 | |
|
467 | 466 | force = opts.get(b'force') |
|
468 | 467 | if not force: |
|
469 | 468 | match = matchmod.badmatch(match, fail) |
|
470 | 469 | |
|
471 | 470 | status = repo.status(match=match) |
|
472 | 471 | |
|
473 | 472 | overrides = {(b'ui', b'commitsubrepos'): True} |
|
474 | 473 | |
|
475 | 474 | with repo.ui.configoverride(overrides, b'record'): |
|
476 | 475 | # subrepoutil.precommit() modifies the status |
|
477 | 476 | tmpstatus = scmutil.status( |
|
478 | 477 | copymod.copy(status.modified), |
|
479 | 478 | copymod.copy(status.added), |
|
480 | 479 | copymod.copy(status.removed), |
|
481 | 480 | copymod.copy(status.deleted), |
|
482 | 481 | copymod.copy(status.unknown), |
|
483 | 482 | copymod.copy(status.ignored), |
|
484 | 483 | copymod.copy(status.clean), # pytype: disable=wrong-arg-count |
|
485 | 484 | ) |
|
486 | 485 | |
|
487 | 486 | # Force allows -X subrepo to skip the subrepo. |
|
488 | 487 | subs, commitsubs, newstate = subrepoutil.precommit( |
|
489 | 488 | repo.ui, wctx, tmpstatus, match, force=True |
|
490 | 489 | ) |
|
491 | 490 | for s in subs: |
|
492 | 491 | if s in commitsubs: |
|
493 | 492 | dirtyreason = wctx.sub(s).dirtyreason(True) |
|
494 | 493 | raise error.Abort(dirtyreason) |
|
495 | 494 | |
|
496 | 495 | if not force: |
|
497 | 496 | repo.checkcommitpatterns(wctx, match, status, fail) |
|
498 | 497 | diffopts = patch.difffeatureopts( |
|
499 | 498 | ui, |
|
500 | 499 | opts=opts, |
|
501 | 500 | whitespace=True, |
|
502 | 501 | section=b'commands', |
|
503 | 502 | configprefix=b'commit.interactive.', |
|
504 | 503 | ) |
|
505 | 504 | diffopts.nodates = True |
|
506 | 505 | diffopts.git = True |
|
507 | 506 | diffopts.showfunc = True |
|
508 | 507 | originaldiff = patch.diff(repo, changes=status, opts=diffopts) |
|
509 | 508 | originalchunks = patch.parsepatch(originaldiff) |
|
510 | 509 | match = scmutil.match(repo[None], pats) |
|
511 | 510 | |
|
512 | 511 | # 1. filter patch, since we are intending to apply subset of it |
|
513 | 512 | try: |
|
514 | 513 | chunks, newopts = filterfn(ui, originalchunks, match) |
|
515 | 514 | except error.PatchError as err: |
|
516 | 515 | raise error.Abort(_(b'error parsing patch: %s') % err) |
|
517 | 516 | opts.update(newopts) |
|
518 | 517 | |
|
519 | 518 | # We need to keep a backup of files that have been newly added and |
|
520 | 519 | # modified during the recording process because there is a previous |
|
521 | 520 | # version without the edit in the workdir. We also will need to restore |
|
522 | 521 | # files that were the sources of renames so that the patch application |
|
523 | 522 | # works. |
|
524 | 523 | newlyaddedandmodifiedfiles, alsorestore = newandmodified( |
|
525 | 524 | chunks, originalchunks |
|
526 | 525 | ) |
|
527 | 526 | contenders = set() |
|
528 | 527 | for h in chunks: |
|
529 | 528 | try: |
|
530 | 529 | contenders.update(set(h.files())) |
|
531 | 530 | except AttributeError: |
|
532 | 531 | pass |
|
533 | 532 | |
|
534 | 533 | changed = status.modified + status.added + status.removed |
|
535 | 534 | newfiles = [f for f in changed if f in contenders] |
|
536 | 535 | if not newfiles: |
|
537 | 536 | ui.status(_(b'no changes to record\n')) |
|
538 | 537 | return 0 |
|
539 | 538 | |
|
540 | 539 | modified = set(status.modified) |
|
541 | 540 | |
|
542 | 541 | # 2. backup changed files, so we can restore them in the end |
|
543 | 542 | |
|
544 | 543 | if backupall: |
|
545 | 544 | tobackup = changed |
|
546 | 545 | else: |
|
547 | 546 | tobackup = [ |
|
548 | 547 | f |
|
549 | 548 | for f in newfiles |
|
550 | 549 | if f in modified or f in newlyaddedandmodifiedfiles |
|
551 | 550 | ] |
|
552 | 551 | backups = {} |
|
553 | 552 | if tobackup: |
|
554 | 553 | backupdir = repo.vfs.join(b'record-backups') |
|
555 | 554 | try: |
|
556 | 555 | os.mkdir(backupdir) |
|
557 | 556 | except OSError as err: |
|
558 | 557 | if err.errno != errno.EEXIST: |
|
559 | 558 | raise |
|
560 | 559 | try: |
|
561 | 560 | # backup continues |
|
562 | 561 | for f in tobackup: |
|
563 | 562 | fd, tmpname = pycompat.mkstemp( |
|
564 | 563 | prefix=f.replace(b'/', b'_') + b'.', dir=backupdir |
|
565 | 564 | ) |
|
566 | 565 | os.close(fd) |
|
567 | 566 | ui.debug(b'backup %r as %r\n' % (f, tmpname)) |
|
568 | 567 | util.copyfile(repo.wjoin(f), tmpname, copystat=True) |
|
569 | 568 | backups[f] = tmpname |
|
570 | 569 | |
|
571 | 570 | fp = stringio() |
|
572 | 571 | for c in chunks: |
|
573 | 572 | fname = c.filename() |
|
574 | 573 | if fname in backups: |
|
575 | 574 | c.write(fp) |
|
576 | 575 | dopatch = fp.tell() |
|
577 | 576 | fp.seek(0) |
|
578 | 577 | |
|
579 | 578 | # 2.5 optionally review / modify patch in text editor |
|
580 | 579 | if opts.get(b'review', False): |
|
581 | 580 | patchtext = ( |
|
582 | 581 | crecordmod.diffhelptext |
|
583 | 582 | + crecordmod.patchhelptext |
|
584 | 583 | + fp.read() |
|
585 | 584 | ) |
|
586 | 585 | reviewedpatch = ui.edit( |
|
587 | 586 | patchtext, b"", action=b"diff", repopath=repo.path |
|
588 | 587 | ) |
|
589 | 588 | fp.truncate(0) |
|
590 | 589 | fp.write(reviewedpatch) |
|
591 | 590 | fp.seek(0) |
|
592 | 591 | |
|
593 | 592 | [os.unlink(repo.wjoin(c)) for c in newlyaddedandmodifiedfiles] |
|
594 | 593 | # 3a. apply filtered patch to clean repo (clean) |
|
595 | 594 | if backups: |
|
596 | 595 | m = scmutil.matchfiles(repo, set(backups.keys()) | alsorestore) |
|
597 | 596 | mergemod.revert_to(repo[b'.'], matcher=m) |
|
598 | 597 | |
|
599 | 598 | # 3b. (apply) |
|
600 | 599 | if dopatch: |
|
601 | 600 | try: |
|
602 | 601 | ui.debug(b'applying patch\n') |
|
603 | 602 | ui.debug(fp.getvalue()) |
|
604 | 603 | patch.internalpatch(ui, repo, fp, 1, eolmode=None) |
|
605 | 604 | except error.PatchError as err: |
|
606 | 605 | raise error.Abort(pycompat.bytestr(err)) |
|
607 | 606 | del fp |
|
608 | 607 | |
|
609 | 608 | # 4. We prepared working directory according to filtered |
|
610 | 609 | # patch. Now is the time to delegate the job to |
|
611 | 610 | # commit/qrefresh or the like! |
|
612 | 611 | |
|
613 | 612 | # Make all of the pathnames absolute. |
|
614 | 613 | newfiles = [repo.wjoin(nf) for nf in newfiles] |
|
615 | 614 | return commitfunc(ui, repo, *newfiles, **pycompat.strkwargs(opts)) |
|
616 | 615 | finally: |
|
617 | 616 | # 5. finally restore backed-up files |
|
618 | 617 | try: |
|
619 | 618 | dirstate = repo.dirstate |
|
620 | 619 | for realname, tmpname in pycompat.iteritems(backups): |
|
621 | 620 | ui.debug(b'restoring %r to %r\n' % (tmpname, realname)) |
|
622 | 621 | |
|
623 | 622 | if dirstate[realname] == b'n': |
|
624 | 623 | # without normallookup, restoring timestamp |
|
625 | 624 | # may cause partially committed files |
|
626 | 625 | # to be treated as unmodified |
|
627 | 626 | dirstate.normallookup(realname) |
|
628 | 627 | |
|
629 | 628 | # copystat=True here and above are a hack to trick any |
|
630 | 629 | # editors that have f open that we haven't modified them. |
|
631 | 630 | # |
|
632 | 631 | # Also note that this racy as an editor could notice the |
|
633 | 632 | # file's mtime before we've finished writing it. |
|
634 | 633 | util.copyfile(tmpname, repo.wjoin(realname), copystat=True) |
|
635 | 634 | os.unlink(tmpname) |
|
636 | 635 | if tobackup: |
|
637 | 636 | os.rmdir(backupdir) |
|
638 | 637 | except OSError: |
|
639 | 638 | pass |
|
640 | 639 | |
|
641 | 640 | def recordinwlock(ui, repo, message, match, opts): |
|
642 | 641 | with repo.wlock(): |
|
643 | 642 | return recordfunc(ui, repo, message, match, opts) |
|
644 | 643 | |
|
645 | 644 | return commit(ui, repo, recordinwlock, pats, opts) |
|
646 | 645 | |
|
647 | 646 | |
|
648 | 647 | class dirnode(object): |
|
649 | 648 | """ |
|
650 | 649 | Represent a directory in user working copy with information required for |
|
651 | 650 | the purpose of tersing its status. |
|
652 | 651 | |
|
653 | 652 | path is the path to the directory, without a trailing '/' |
|
654 | 653 | |
|
655 | 654 | statuses is a set of statuses of all files in this directory (this includes |
|
656 | 655 | all the files in all the subdirectories too) |
|
657 | 656 | |
|
658 | 657 | files is a list of files which are direct child of this directory |
|
659 | 658 | |
|
660 | 659 | subdirs is a dictionary of sub-directory name as the key and it's own |
|
661 | 660 | dirnode object as the value |
|
662 | 661 | """ |
|
663 | 662 | |
|
664 | 663 | def __init__(self, dirpath): |
|
665 | 664 | self.path = dirpath |
|
666 | 665 | self.statuses = set() |
|
667 | 666 | self.files = [] |
|
668 | 667 | self.subdirs = {} |
|
669 | 668 | |
|
670 | 669 | def _addfileindir(self, filename, status): |
|
671 | 670 | """Add a file in this directory as a direct child.""" |
|
672 | 671 | self.files.append((filename, status)) |
|
673 | 672 | |
|
674 | 673 | def addfile(self, filename, status): |
|
675 | 674 | """ |
|
676 | 675 | Add a file to this directory or to its direct parent directory. |
|
677 | 676 | |
|
678 | 677 | If the file is not direct child of this directory, we traverse to the |
|
679 | 678 | directory of which this file is a direct child of and add the file |
|
680 | 679 | there. |
|
681 | 680 | """ |
|
682 | 681 | |
|
683 | 682 | # the filename contains a path separator, it means it's not the direct |
|
684 | 683 | # child of this directory |
|
685 | 684 | if b'/' in filename: |
|
686 | 685 | subdir, filep = filename.split(b'/', 1) |
|
687 | 686 | |
|
688 | 687 | # does the dirnode object for subdir exists |
|
689 | 688 | if subdir not in self.subdirs: |
|
690 | 689 | subdirpath = pathutil.join(self.path, subdir) |
|
691 | 690 | self.subdirs[subdir] = dirnode(subdirpath) |
|
692 | 691 | |
|
693 | 692 | # try adding the file in subdir |
|
694 | 693 | self.subdirs[subdir].addfile(filep, status) |
|
695 | 694 | |
|
696 | 695 | else: |
|
697 | 696 | self._addfileindir(filename, status) |
|
698 | 697 | |
|
699 | 698 | if status not in self.statuses: |
|
700 | 699 | self.statuses.add(status) |
|
701 | 700 | |
|
702 | 701 | def iterfilepaths(self): |
|
703 | 702 | """Yield (status, path) for files directly under this directory.""" |
|
704 | 703 | for f, st in self.files: |
|
705 | 704 | yield st, pathutil.join(self.path, f) |
|
706 | 705 | |
|
707 | 706 | def tersewalk(self, terseargs): |
|
708 | 707 | """ |
|
709 | 708 | Yield (status, path) obtained by processing the status of this |
|
710 | 709 | dirnode. |
|
711 | 710 | |
|
712 | 711 | terseargs is the string of arguments passed by the user with `--terse` |
|
713 | 712 | flag. |
|
714 | 713 | |
|
715 | 714 | Following are the cases which can happen: |
|
716 | 715 | |
|
717 | 716 | 1) All the files in the directory (including all the files in its |
|
718 | 717 | subdirectories) share the same status and the user has asked us to terse |
|
719 | 718 | that status. -> yield (status, dirpath). dirpath will end in '/'. |
|
720 | 719 | |
|
721 | 720 | 2) Otherwise, we do following: |
|
722 | 721 | |
|
723 | 722 | a) Yield (status, filepath) for all the files which are in this |
|
724 | 723 | directory (only the ones in this directory, not the subdirs) |
|
725 | 724 | |
|
726 | 725 | b) Recurse the function on all the subdirectories of this |
|
727 | 726 | directory |
|
728 | 727 | """ |
|
729 | 728 | |
|
730 | 729 | if len(self.statuses) == 1: |
|
731 | 730 | onlyst = self.statuses.pop() |
|
732 | 731 | |
|
733 | 732 | # Making sure we terse only when the status abbreviation is |
|
734 | 733 | # passed as terse argument |
|
735 | 734 | if onlyst in terseargs: |
|
736 | 735 | yield onlyst, self.path + b'/' |
|
737 | 736 | return |
|
738 | 737 | |
|
739 | 738 | # add the files to status list |
|
740 | 739 | for st, fpath in self.iterfilepaths(): |
|
741 | 740 | yield st, fpath |
|
742 | 741 | |
|
743 | 742 | # recurse on the subdirs |
|
744 | 743 | for dirobj in self.subdirs.values(): |
|
745 | 744 | for st, fpath in dirobj.tersewalk(terseargs): |
|
746 | 745 | yield st, fpath |
|
747 | 746 | |
|
748 | 747 | |
|
749 | 748 | def tersedir(statuslist, terseargs): |
|
750 | 749 | """ |
|
751 | 750 | Terse the status if all the files in a directory shares the same status. |
|
752 | 751 | |
|
753 | 752 | statuslist is scmutil.status() object which contains a list of files for |
|
754 | 753 | each status. |
|
755 | 754 | terseargs is string which is passed by the user as the argument to `--terse` |
|
756 | 755 | flag. |
|
757 | 756 | |
|
758 | 757 | The function makes a tree of objects of dirnode class, and at each node it |
|
759 | 758 | stores the information required to know whether we can terse a certain |
|
760 | 759 | directory or not. |
|
761 | 760 | """ |
|
762 | 761 | # the order matters here as that is used to produce final list |
|
763 | 762 | allst = (b'm', b'a', b'r', b'd', b'u', b'i', b'c') |
|
764 | 763 | |
|
765 | 764 | # checking the argument validity |
|
766 | 765 | for s in pycompat.bytestr(terseargs): |
|
767 | 766 | if s not in allst: |
|
768 | 767 | raise error.Abort(_(b"'%s' not recognized") % s) |
|
769 | 768 | |
|
770 | 769 | # creating a dirnode object for the root of the repo |
|
771 | 770 | rootobj = dirnode(b'') |
|
772 | 771 | pstatus = ( |
|
773 | 772 | b'modified', |
|
774 | 773 | b'added', |
|
775 | 774 | b'deleted', |
|
776 | 775 | b'clean', |
|
777 | 776 | b'unknown', |
|
778 | 777 | b'ignored', |
|
779 | 778 | b'removed', |
|
780 | 779 | ) |
|
781 | 780 | |
|
782 | 781 | tersedict = {} |
|
783 | 782 | for attrname in pstatus: |
|
784 | 783 | statuschar = attrname[0:1] |
|
785 | 784 | for f in getattr(statuslist, attrname): |
|
786 | 785 | rootobj.addfile(f, statuschar) |
|
787 | 786 | tersedict[statuschar] = [] |
|
788 | 787 | |
|
789 | 788 | # we won't be tersing the root dir, so add files in it |
|
790 | 789 | for st, fpath in rootobj.iterfilepaths(): |
|
791 | 790 | tersedict[st].append(fpath) |
|
792 | 791 | |
|
793 | 792 | # process each sub-directory and build tersedict |
|
794 | 793 | for subdir in rootobj.subdirs.values(): |
|
795 | 794 | for st, f in subdir.tersewalk(terseargs): |
|
796 | 795 | tersedict[st].append(f) |
|
797 | 796 | |
|
798 | 797 | tersedlist = [] |
|
799 | 798 | for st in allst: |
|
800 | 799 | tersedict[st].sort() |
|
801 | 800 | tersedlist.append(tersedict[st]) |
|
802 | 801 | |
|
803 | 802 | return scmutil.status(*tersedlist) |
|
804 | 803 | |
|
805 | 804 | |
|
806 | 805 | def _commentlines(raw): |
|
807 | 806 | '''Surround lineswith a comment char and a new line''' |
|
808 | 807 | lines = raw.splitlines() |
|
809 | 808 | commentedlines = [b'# %s' % line for line in lines] |
|
810 | 809 | return b'\n'.join(commentedlines) + b'\n' |
|
811 | 810 | |
|
812 | 811 | |
|
813 | 812 | @attr.s(frozen=True) |
|
814 | 813 | class morestatus(object): |
|
815 | 814 | reporoot = attr.ib() |
|
816 | 815 | unfinishedop = attr.ib() |
|
817 | 816 | unfinishedmsg = attr.ib() |
|
818 | 817 | activemerge = attr.ib() |
|
819 | 818 | unresolvedpaths = attr.ib() |
|
820 | 819 | _formattedpaths = attr.ib(init=False, default=set()) |
|
821 | 820 | _label = b'status.morestatus' |
|
822 | 821 | |
|
823 | 822 | def formatfile(self, path, fm): |
|
824 | 823 | self._formattedpaths.add(path) |
|
825 | 824 | if self.activemerge and path in self.unresolvedpaths: |
|
826 | 825 | fm.data(unresolved=True) |
|
827 | 826 | |
|
828 | 827 | def formatfooter(self, fm): |
|
829 | 828 | if self.unfinishedop or self.unfinishedmsg: |
|
830 | 829 | fm.startitem() |
|
831 | 830 | fm.data(itemtype=b'morestatus') |
|
832 | 831 | |
|
833 | 832 | if self.unfinishedop: |
|
834 | 833 | fm.data(unfinished=self.unfinishedop) |
|
835 | 834 | statemsg = ( |
|
836 | 835 | _(b'The repository is in an unfinished *%s* state.') |
|
837 | 836 | % self.unfinishedop |
|
838 | 837 | ) |
|
839 | 838 | fm.plain(b'%s\n' % _commentlines(statemsg), label=self._label) |
|
840 | 839 | if self.unfinishedmsg: |
|
841 | 840 | fm.data(unfinishedmsg=self.unfinishedmsg) |
|
842 | 841 | |
|
843 | 842 | # May also start new data items. |
|
844 | 843 | self._formatconflicts(fm) |
|
845 | 844 | |
|
846 | 845 | if self.unfinishedmsg: |
|
847 | 846 | fm.plain( |
|
848 | 847 | b'%s\n' % _commentlines(self.unfinishedmsg), label=self._label |
|
849 | 848 | ) |
|
850 | 849 | |
|
851 | 850 | def _formatconflicts(self, fm): |
|
852 | 851 | if not self.activemerge: |
|
853 | 852 | return |
|
854 | 853 | |
|
855 | 854 | if self.unresolvedpaths: |
|
856 | 855 | mergeliststr = b'\n'.join( |
|
857 | 856 | [ |
|
858 | 857 | b' %s' |
|
859 | 858 | % util.pathto(self.reporoot, encoding.getcwd(), path) |
|
860 | 859 | for path in self.unresolvedpaths |
|
861 | 860 | ] |
|
862 | 861 | ) |
|
863 | 862 | msg = ( |
|
864 | 863 | _( |
|
865 | 864 | '''Unresolved merge conflicts: |
|
866 | 865 | |
|
867 | 866 | %s |
|
868 | 867 | |
|
869 | 868 | To mark files as resolved: hg resolve --mark FILE''' |
|
870 | 869 | ) |
|
871 | 870 | % mergeliststr |
|
872 | 871 | ) |
|
873 | 872 | |
|
874 | 873 | # If any paths with unresolved conflicts were not previously |
|
875 | 874 | # formatted, output them now. |
|
876 | 875 | for f in self.unresolvedpaths: |
|
877 | 876 | if f in self._formattedpaths: |
|
878 | 877 | # Already output. |
|
879 | 878 | continue |
|
880 | 879 | fm.startitem() |
|
881 | 880 | # We can't claim to know the status of the file - it may just |
|
882 | 881 | # have been in one of the states that were not requested for |
|
883 | 882 | # display, so it could be anything. |
|
884 | 883 | fm.data(itemtype=b'file', path=f, unresolved=True) |
|
885 | 884 | |
|
886 | 885 | else: |
|
887 | 886 | msg = _(b'No unresolved merge conflicts.') |
|
888 | 887 | |
|
889 | 888 | fm.plain(b'%s\n' % _commentlines(msg), label=self._label) |
|
890 | 889 | |
|
891 | 890 | |
|
892 | 891 | def readmorestatus(repo): |
|
893 | 892 | """Returns a morestatus object if the repo has unfinished state.""" |
|
894 | 893 | statetuple = statemod.getrepostate(repo) |
|
895 | 894 | mergestate = mergestatemod.mergestate.read(repo) |
|
896 | 895 | activemerge = mergestate.active() |
|
897 | 896 | if not statetuple and not activemerge: |
|
898 | 897 | return None |
|
899 | 898 | |
|
900 | 899 | unfinishedop = unfinishedmsg = unresolved = None |
|
901 | 900 | if statetuple: |
|
902 | 901 | unfinishedop, unfinishedmsg = statetuple |
|
903 | 902 | if activemerge: |
|
904 | 903 | unresolved = sorted(mergestate.unresolved()) |
|
905 | 904 | return morestatus( |
|
906 | 905 | repo.root, unfinishedop, unfinishedmsg, activemerge, unresolved |
|
907 | 906 | ) |
|
908 | 907 | |
|
909 | 908 | |
|
910 | 909 | def findpossible(cmd, table, strict=False): |
|
911 | 910 | """ |
|
912 | 911 | Return cmd -> (aliases, command table entry) |
|
913 | 912 | for each matching command. |
|
914 | 913 | Return debug commands (or their aliases) only if no normal command matches. |
|
915 | 914 | """ |
|
916 | 915 | choice = {} |
|
917 | 916 | debugchoice = {} |
|
918 | 917 | |
|
919 | 918 | if cmd in table: |
|
920 | 919 | # short-circuit exact matches, "log" alias beats "log|history" |
|
921 | 920 | keys = [cmd] |
|
922 | 921 | else: |
|
923 | 922 | keys = table.keys() |
|
924 | 923 | |
|
925 | 924 | allcmds = [] |
|
926 | 925 | for e in keys: |
|
927 | 926 | aliases = parsealiases(e) |
|
928 | 927 | allcmds.extend(aliases) |
|
929 | 928 | found = None |
|
930 | 929 | if cmd in aliases: |
|
931 | 930 | found = cmd |
|
932 | 931 | elif not strict: |
|
933 | 932 | for a in aliases: |
|
934 | 933 | if a.startswith(cmd): |
|
935 | 934 | found = a |
|
936 | 935 | break |
|
937 | 936 | if found is not None: |
|
938 | 937 | if aliases[0].startswith(b"debug") or found.startswith(b"debug"): |
|
939 | 938 | debugchoice[found] = (aliases, table[e]) |
|
940 | 939 | else: |
|
941 | 940 | choice[found] = (aliases, table[e]) |
|
942 | 941 | |
|
943 | 942 | if not choice and debugchoice: |
|
944 | 943 | choice = debugchoice |
|
945 | 944 | |
|
946 | 945 | return choice, allcmds |
|
947 | 946 | |
|
948 | 947 | |
|
949 | 948 | def findcmd(cmd, table, strict=True): |
|
950 | 949 | """Return (aliases, command table entry) for command string.""" |
|
951 | 950 | choice, allcmds = findpossible(cmd, table, strict) |
|
952 | 951 | |
|
953 | 952 | if cmd in choice: |
|
954 | 953 | return choice[cmd] |
|
955 | 954 | |
|
956 | 955 | if len(choice) > 1: |
|
957 | 956 | clist = sorted(choice) |
|
958 | 957 | raise error.AmbiguousCommand(cmd, clist) |
|
959 | 958 | |
|
960 | 959 | if choice: |
|
961 | 960 | return list(choice.values())[0] |
|
962 | 961 | |
|
963 | 962 | raise error.UnknownCommand(cmd, allcmds) |
|
964 | 963 | |
|
965 | 964 | |
|
966 | 965 | def changebranch(ui, repo, revs, label, opts): |
|
967 | 966 | """ Change the branch name of given revs to label """ |
|
968 | 967 | |
|
969 | 968 | with repo.wlock(), repo.lock(), repo.transaction(b'branches'): |
|
970 | 969 | # abort in case of uncommitted merge or dirty wdir |
|
971 | 970 | bailifchanged(repo) |
|
972 | 971 | revs = scmutil.revrange(repo, revs) |
|
973 | 972 | if not revs: |
|
974 | 973 | raise error.Abort(b"empty revision set") |
|
975 | 974 | roots = repo.revs(b'roots(%ld)', revs) |
|
976 | 975 | if len(roots) > 1: |
|
977 | 976 | raise error.Abort( |
|
978 | 977 | _(b"cannot change branch of non-linear revisions") |
|
979 | 978 | ) |
|
980 | 979 | rewriteutil.precheck(repo, revs, b'change branch of') |
|
981 | 980 | |
|
982 | 981 | root = repo[roots.first()] |
|
983 | 982 | rpb = {parent.branch() for parent in root.parents()} |
|
984 | 983 | if ( |
|
985 | 984 | not opts.get(b'force') |
|
986 | 985 | and label not in rpb |
|
987 | 986 | and label in repo.branchmap() |
|
988 | 987 | ): |
|
989 | 988 | raise error.Abort(_(b"a branch of the same name already exists")) |
|
990 | 989 | |
|
991 | 990 | if repo.revs(b'obsolete() and %ld', revs): |
|
992 | 991 | raise error.Abort( |
|
993 | 992 | _(b"cannot change branch of a obsolete changeset") |
|
994 | 993 | ) |
|
995 | 994 | |
|
996 | 995 | # make sure only topological heads |
|
997 | 996 | if repo.revs(b'heads(%ld) - head()', revs): |
|
998 | 997 | raise error.Abort(_(b"cannot change branch in middle of a stack")) |
|
999 | 998 | |
|
1000 | 999 | replacements = {} |
|
1001 | 1000 | # avoid import cycle mercurial.cmdutil -> mercurial.context -> |
|
1002 | 1001 | # mercurial.subrepo -> mercurial.cmdutil |
|
1003 | 1002 | from . import context |
|
1004 | 1003 | |
|
1005 | 1004 | for rev in revs: |
|
1006 | 1005 | ctx = repo[rev] |
|
1007 | 1006 | oldbranch = ctx.branch() |
|
1008 | 1007 | # check if ctx has same branch |
|
1009 | 1008 | if oldbranch == label: |
|
1010 | 1009 | continue |
|
1011 | 1010 | |
|
1012 | 1011 | def filectxfn(repo, newctx, path): |
|
1013 | 1012 | try: |
|
1014 | 1013 | return ctx[path] |
|
1015 | 1014 | except error.ManifestLookupError: |
|
1016 | 1015 | return None |
|
1017 | 1016 | |
|
1018 | 1017 | ui.debug( |
|
1019 | 1018 | b"changing branch of '%s' from '%s' to '%s'\n" |
|
1020 | 1019 | % (hex(ctx.node()), oldbranch, label) |
|
1021 | 1020 | ) |
|
1022 | 1021 | extra = ctx.extra() |
|
1023 | 1022 | extra[b'branch_change'] = hex(ctx.node()) |
|
1024 | 1023 | # While changing branch of set of linear commits, make sure that |
|
1025 | 1024 | # we base our commits on new parent rather than old parent which |
|
1026 | 1025 | # was obsoleted while changing the branch |
|
1027 | 1026 | p1 = ctx.p1().node() |
|
1028 | 1027 | p2 = ctx.p2().node() |
|
1029 | 1028 | if p1 in replacements: |
|
1030 | 1029 | p1 = replacements[p1][0] |
|
1031 | 1030 | if p2 in replacements: |
|
1032 | 1031 | p2 = replacements[p2][0] |
|
1033 | 1032 | |
|
1034 | 1033 | mc = context.memctx( |
|
1035 | 1034 | repo, |
|
1036 | 1035 | (p1, p2), |
|
1037 | 1036 | ctx.description(), |
|
1038 | 1037 | ctx.files(), |
|
1039 | 1038 | filectxfn, |
|
1040 | 1039 | user=ctx.user(), |
|
1041 | 1040 | date=ctx.date(), |
|
1042 | 1041 | extra=extra, |
|
1043 | 1042 | branch=label, |
|
1044 | 1043 | ) |
|
1045 | 1044 | |
|
1046 | 1045 | newnode = repo.commitctx(mc) |
|
1047 | 1046 | replacements[ctx.node()] = (newnode,) |
|
1048 | 1047 | ui.debug(b'new node id is %s\n' % hex(newnode)) |
|
1049 | 1048 | |
|
1050 | 1049 | # create obsmarkers and move bookmarks |
|
1051 | 1050 | scmutil.cleanupnodes( |
|
1052 | 1051 | repo, replacements, b'branch-change', fixphase=True |
|
1053 | 1052 | ) |
|
1054 | 1053 | |
|
1055 | 1054 | # move the working copy too |
|
1056 | 1055 | wctx = repo[None] |
|
1057 | 1056 | # in-progress merge is a bit too complex for now. |
|
1058 | 1057 | if len(wctx.parents()) == 1: |
|
1059 | 1058 | newid = replacements.get(wctx.p1().node()) |
|
1060 | 1059 | if newid is not None: |
|
1061 | 1060 | # avoid import cycle mercurial.cmdutil -> mercurial.hg -> |
|
1062 | 1061 | # mercurial.cmdutil |
|
1063 | 1062 | from . import hg |
|
1064 | 1063 | |
|
1065 | 1064 | hg.update(repo, newid[0], quietempty=True) |
|
1066 | 1065 | |
|
1067 | 1066 | ui.status(_(b"changed branch on %d changesets\n") % len(replacements)) |
|
1068 | 1067 | |
|
1069 | 1068 | |
|
1070 | 1069 | def findrepo(p): |
|
1071 | 1070 | while not os.path.isdir(os.path.join(p, b".hg")): |
|
1072 | 1071 | oldp, p = p, os.path.dirname(p) |
|
1073 | 1072 | if p == oldp: |
|
1074 | 1073 | return None |
|
1075 | 1074 | |
|
1076 | 1075 | return p |
|
1077 | 1076 | |
|
1078 | 1077 | |
|
1079 | 1078 | def bailifchanged(repo, merge=True, hint=None): |
|
1080 | 1079 | """ enforce the precondition that working directory must be clean. |
|
1081 | 1080 | |
|
1082 | 1081 | 'merge' can be set to false if a pending uncommitted merge should be |
|
1083 | 1082 | ignored (such as when 'update --check' runs). |
|
1084 | 1083 | |
|
1085 | 1084 | 'hint' is the usual hint given to Abort exception. |
|
1086 | 1085 | """ |
|
1087 | 1086 | |
|
1088 | 1087 | if merge and repo.dirstate.p2() != nullid: |
|
1089 | 1088 | raise error.Abort(_(b'outstanding uncommitted merge'), hint=hint) |
|
1090 | 1089 | st = repo.status() |
|
1091 | 1090 | if st.modified or st.added or st.removed or st.deleted: |
|
1092 | 1091 | raise error.Abort(_(b'uncommitted changes'), hint=hint) |
|
1093 | 1092 | ctx = repo[None] |
|
1094 | 1093 | for s in sorted(ctx.substate): |
|
1095 | 1094 | ctx.sub(s).bailifchanged(hint=hint) |
|
1096 | 1095 | |
|
1097 | 1096 | |
|
1098 | 1097 | def logmessage(ui, opts): |
|
1099 | 1098 | """ get the log message according to -m and -l option """ |
|
1100 | 1099 | |
|
1101 | 1100 | check_at_most_one_arg(opts, b'message', b'logfile') |
|
1102 | 1101 | |
|
1103 | 1102 | message = opts.get(b'message') |
|
1104 | 1103 | logfile = opts.get(b'logfile') |
|
1105 | 1104 | |
|
1106 | 1105 | if not message and logfile: |
|
1107 | 1106 | try: |
|
1108 | 1107 | if isstdiofilename(logfile): |
|
1109 | 1108 | message = ui.fin.read() |
|
1110 | 1109 | else: |
|
1111 | 1110 | message = b'\n'.join(util.readfile(logfile).splitlines()) |
|
1112 | 1111 | except IOError as inst: |
|
1113 | 1112 | raise error.Abort( |
|
1114 | 1113 | _(b"can't read commit message '%s': %s") |
|
1115 | 1114 | % (logfile, encoding.strtolocal(inst.strerror)) |
|
1116 | 1115 | ) |
|
1117 | 1116 | return message |
|
1118 | 1117 | |
|
1119 | 1118 | |
|
1120 | 1119 | def mergeeditform(ctxorbool, baseformname): |
|
1121 | 1120 | """return appropriate editform name (referencing a committemplate) |
|
1122 | 1121 | |
|
1123 | 1122 | 'ctxorbool' is either a ctx to be committed, or a bool indicating whether |
|
1124 | 1123 | merging is committed. |
|
1125 | 1124 | |
|
1126 | 1125 | This returns baseformname with '.merge' appended if it is a merge, |
|
1127 | 1126 | otherwise '.normal' is appended. |
|
1128 | 1127 | """ |
|
1129 | 1128 | if isinstance(ctxorbool, bool): |
|
1130 | 1129 | if ctxorbool: |
|
1131 | 1130 | return baseformname + b".merge" |
|
1132 | 1131 | elif len(ctxorbool.parents()) > 1: |
|
1133 | 1132 | return baseformname + b".merge" |
|
1134 | 1133 | |
|
1135 | 1134 | return baseformname + b".normal" |
|
1136 | 1135 | |
|
1137 | 1136 | |
|
1138 | 1137 | def getcommiteditor( |
|
1139 | 1138 | edit=False, finishdesc=None, extramsg=None, editform=b'', **opts |
|
1140 | 1139 | ): |
|
1141 | 1140 | """get appropriate commit message editor according to '--edit' option |
|
1142 | 1141 | |
|
1143 | 1142 | 'finishdesc' is a function to be called with edited commit message |
|
1144 | 1143 | (= 'description' of the new changeset) just after editing, but |
|
1145 | 1144 | before checking empty-ness. It should return actual text to be |
|
1146 | 1145 | stored into history. This allows to change description before |
|
1147 | 1146 | storing. |
|
1148 | 1147 | |
|
1149 | 1148 | 'extramsg' is a extra message to be shown in the editor instead of |
|
1150 | 1149 | 'Leave message empty to abort commit' line. 'HG: ' prefix and EOL |
|
1151 | 1150 | is automatically added. |
|
1152 | 1151 | |
|
1153 | 1152 | 'editform' is a dot-separated list of names, to distinguish |
|
1154 | 1153 | the purpose of commit text editing. |
|
1155 | 1154 | |
|
1156 | 1155 | 'getcommiteditor' returns 'commitforceeditor' regardless of |
|
1157 | 1156 | 'edit', if one of 'finishdesc' or 'extramsg' is specified, because |
|
1158 | 1157 | they are specific for usage in MQ. |
|
1159 | 1158 | """ |
|
1160 | 1159 | if edit or finishdesc or extramsg: |
|
1161 | 1160 | return lambda r, c, s: commitforceeditor( |
|
1162 | 1161 | r, c, s, finishdesc=finishdesc, extramsg=extramsg, editform=editform |
|
1163 | 1162 | ) |
|
1164 | 1163 | elif editform: |
|
1165 | 1164 | return lambda r, c, s: commiteditor(r, c, s, editform=editform) |
|
1166 | 1165 | else: |
|
1167 | 1166 | return commiteditor |
|
1168 | 1167 | |
|
1169 | 1168 | |
|
1170 | 1169 | def _escapecommandtemplate(tmpl): |
|
1171 | 1170 | parts = [] |
|
1172 | 1171 | for typ, start, end in templater.scantemplate(tmpl, raw=True): |
|
1173 | 1172 | if typ == b'string': |
|
1174 | 1173 | parts.append(stringutil.escapestr(tmpl[start:end])) |
|
1175 | 1174 | else: |
|
1176 | 1175 | parts.append(tmpl[start:end]) |
|
1177 | 1176 | return b''.join(parts) |
|
1178 | 1177 | |
|
1179 | 1178 | |
|
1180 | 1179 | def rendercommandtemplate(ui, tmpl, props): |
|
1181 | 1180 | r"""Expand a literal template 'tmpl' in a way suitable for command line |
|
1182 | 1181 | |
|
1183 | 1182 | '\' in outermost string is not taken as an escape character because it |
|
1184 | 1183 | is a directory separator on Windows. |
|
1185 | 1184 | |
|
1186 | 1185 | >>> from . import ui as uimod |
|
1187 | 1186 | >>> ui = uimod.ui() |
|
1188 | 1187 | >>> rendercommandtemplate(ui, b'c:\\{path}', {b'path': b'foo'}) |
|
1189 | 1188 | 'c:\\foo' |
|
1190 | 1189 | >>> rendercommandtemplate(ui, b'{"c:\\{path}"}', {'path': b'foo'}) |
|
1191 | 1190 | 'c:{path}' |
|
1192 | 1191 | """ |
|
1193 | 1192 | if not tmpl: |
|
1194 | 1193 | return tmpl |
|
1195 | 1194 | t = formatter.maketemplater(ui, _escapecommandtemplate(tmpl)) |
|
1196 | 1195 | return t.renderdefault(props) |
|
1197 | 1196 | |
|
1198 | 1197 | |
|
1199 | 1198 | def rendertemplate(ctx, tmpl, props=None): |
|
1200 | 1199 | """Expand a literal template 'tmpl' byte-string against one changeset |
|
1201 | 1200 | |
|
1202 | 1201 | Each props item must be a stringify-able value or a callable returning |
|
1203 | 1202 | such value, i.e. no bare list nor dict should be passed. |
|
1204 | 1203 | """ |
|
1205 | 1204 | repo = ctx.repo() |
|
1206 | 1205 | tres = formatter.templateresources(repo.ui, repo) |
|
1207 | 1206 | t = formatter.maketemplater( |
|
1208 | 1207 | repo.ui, tmpl, defaults=templatekw.keywords, resources=tres |
|
1209 | 1208 | ) |
|
1210 | 1209 | mapping = {b'ctx': ctx} |
|
1211 | 1210 | if props: |
|
1212 | 1211 | mapping.update(props) |
|
1213 | 1212 | return t.renderdefault(mapping) |
|
1214 | 1213 | |
|
1215 | 1214 | |
|
1216 | 1215 | def _buildfntemplate(pat, total=None, seqno=None, revwidth=None, pathname=None): |
|
1217 | 1216 | r"""Convert old-style filename format string to template string |
|
1218 | 1217 | |
|
1219 | 1218 | >>> _buildfntemplate(b'foo-%b-%n.patch', seqno=0) |
|
1220 | 1219 | 'foo-{reporoot|basename}-{seqno}.patch' |
|
1221 | 1220 | >>> _buildfntemplate(b'%R{tags % "{tag}"}%H') |
|
1222 | 1221 | '{rev}{tags % "{tag}"}{node}' |
|
1223 | 1222 | |
|
1224 | 1223 | '\' in outermost strings has to be escaped because it is a directory |
|
1225 | 1224 | separator on Windows: |
|
1226 | 1225 | |
|
1227 | 1226 | >>> _buildfntemplate(b'c:\\tmp\\%R\\%n.patch', seqno=0) |
|
1228 | 1227 | 'c:\\\\tmp\\\\{rev}\\\\{seqno}.patch' |
|
1229 | 1228 | >>> _buildfntemplate(b'\\\\foo\\bar.patch') |
|
1230 | 1229 | '\\\\\\\\foo\\\\bar.patch' |
|
1231 | 1230 | >>> _buildfntemplate(b'\\{tags % "{tag}"}') |
|
1232 | 1231 | '\\\\{tags % "{tag}"}' |
|
1233 | 1232 | |
|
1234 | 1233 | but inner strings follow the template rules (i.e. '\' is taken as an |
|
1235 | 1234 | escape character): |
|
1236 | 1235 | |
|
1237 | 1236 | >>> _buildfntemplate(br'{"c:\tmp"}', seqno=0) |
|
1238 | 1237 | '{"c:\\tmp"}' |
|
1239 | 1238 | """ |
|
1240 | 1239 | expander = { |
|
1241 | 1240 | b'H': b'{node}', |
|
1242 | 1241 | b'R': b'{rev}', |
|
1243 | 1242 | b'h': b'{node|short}', |
|
1244 | 1243 | b'm': br'{sub(r"[^\w]", "_", desc|firstline)}', |
|
1245 | 1244 | b'r': b'{if(revwidth, pad(rev, revwidth, "0", left=True), rev)}', |
|
1246 | 1245 | b'%': b'%', |
|
1247 | 1246 | b'b': b'{reporoot|basename}', |
|
1248 | 1247 | } |
|
1249 | 1248 | if total is not None: |
|
1250 | 1249 | expander[b'N'] = b'{total}' |
|
1251 | 1250 | if seqno is not None: |
|
1252 | 1251 | expander[b'n'] = b'{seqno}' |
|
1253 | 1252 | if total is not None and seqno is not None: |
|
1254 | 1253 | expander[b'n'] = b'{pad(seqno, total|stringify|count, "0", left=True)}' |
|
1255 | 1254 | if pathname is not None: |
|
1256 | 1255 | expander[b's'] = b'{pathname|basename}' |
|
1257 | 1256 | expander[b'd'] = b'{if(pathname|dirname, pathname|dirname, ".")}' |
|
1258 | 1257 | expander[b'p'] = b'{pathname}' |
|
1259 | 1258 | |
|
1260 | 1259 | newname = [] |
|
1261 | 1260 | for typ, start, end in templater.scantemplate(pat, raw=True): |
|
1262 | 1261 | if typ != b'string': |
|
1263 | 1262 | newname.append(pat[start:end]) |
|
1264 | 1263 | continue |
|
1265 | 1264 | i = start |
|
1266 | 1265 | while i < end: |
|
1267 | 1266 | n = pat.find(b'%', i, end) |
|
1268 | 1267 | if n < 0: |
|
1269 | 1268 | newname.append(stringutil.escapestr(pat[i:end])) |
|
1270 | 1269 | break |
|
1271 | 1270 | newname.append(stringutil.escapestr(pat[i:n])) |
|
1272 | 1271 | if n + 2 > end: |
|
1273 | 1272 | raise error.Abort( |
|
1274 | 1273 | _(b"incomplete format spec in output filename") |
|
1275 | 1274 | ) |
|
1276 | 1275 | c = pat[n + 1 : n + 2] |
|
1277 | 1276 | i = n + 2 |
|
1278 | 1277 | try: |
|
1279 | 1278 | newname.append(expander[c]) |
|
1280 | 1279 | except KeyError: |
|
1281 | 1280 | raise error.Abort( |
|
1282 | 1281 | _(b"invalid format spec '%%%s' in output filename") % c |
|
1283 | 1282 | ) |
|
1284 | 1283 | return b''.join(newname) |
|
1285 | 1284 | |
|
1286 | 1285 | |
|
1287 | 1286 | def makefilename(ctx, pat, **props): |
|
1288 | 1287 | if not pat: |
|
1289 | 1288 | return pat |
|
1290 | 1289 | tmpl = _buildfntemplate(pat, **props) |
|
1291 | 1290 | # BUG: alias expansion shouldn't be made against template fragments |
|
1292 | 1291 | # rewritten from %-format strings, but we have no easy way to partially |
|
1293 | 1292 | # disable the expansion. |
|
1294 | 1293 | return rendertemplate(ctx, tmpl, pycompat.byteskwargs(props)) |
|
1295 | 1294 | |
|
1296 | 1295 | |
|
1297 | 1296 | def isstdiofilename(pat): |
|
1298 | 1297 | """True if the given pat looks like a filename denoting stdin/stdout""" |
|
1299 | 1298 | return not pat or pat == b'-' |
|
1300 | 1299 | |
|
1301 | 1300 | |
|
1302 | 1301 | class _unclosablefile(object): |
|
1303 | 1302 | def __init__(self, fp): |
|
1304 | 1303 | self._fp = fp |
|
1305 | 1304 | |
|
1306 | 1305 | def close(self): |
|
1307 | 1306 | pass |
|
1308 | 1307 | |
|
1309 | 1308 | def __iter__(self): |
|
1310 | 1309 | return iter(self._fp) |
|
1311 | 1310 | |
|
1312 | 1311 | def __getattr__(self, attr): |
|
1313 | 1312 | return getattr(self._fp, attr) |
|
1314 | 1313 | |
|
1315 | 1314 | def __enter__(self): |
|
1316 | 1315 | return self |
|
1317 | 1316 | |
|
1318 | 1317 | def __exit__(self, exc_type, exc_value, exc_tb): |
|
1319 | 1318 | pass |
|
1320 | 1319 | |
|
1321 | 1320 | |
|
1322 | 1321 | def makefileobj(ctx, pat, mode=b'wb', **props): |
|
1323 | 1322 | writable = mode not in (b'r', b'rb') |
|
1324 | 1323 | |
|
1325 | 1324 | if isstdiofilename(pat): |
|
1326 | 1325 | repo = ctx.repo() |
|
1327 | 1326 | if writable: |
|
1328 | 1327 | fp = repo.ui.fout |
|
1329 | 1328 | else: |
|
1330 | 1329 | fp = repo.ui.fin |
|
1331 | 1330 | return _unclosablefile(fp) |
|
1332 | 1331 | fn = makefilename(ctx, pat, **props) |
|
1333 | 1332 | return open(fn, mode) |
|
1334 | 1333 | |
|
1335 | 1334 | |
|
1336 | 1335 | def openstorage(repo, cmd, file_, opts, returnrevlog=False): |
|
1337 | 1336 | """opens the changelog, manifest, a filelog or a given revlog""" |
|
1338 | 1337 | cl = opts[b'changelog'] |
|
1339 | 1338 | mf = opts[b'manifest'] |
|
1340 | 1339 | dir = opts[b'dir'] |
|
1341 | 1340 | msg = None |
|
1342 | 1341 | if cl and mf: |
|
1343 | 1342 | msg = _(b'cannot specify --changelog and --manifest at the same time') |
|
1344 | 1343 | elif cl and dir: |
|
1345 | 1344 | msg = _(b'cannot specify --changelog and --dir at the same time') |
|
1346 | 1345 | elif cl or mf or dir: |
|
1347 | 1346 | if file_: |
|
1348 | 1347 | msg = _(b'cannot specify filename with --changelog or --manifest') |
|
1349 | 1348 | elif not repo: |
|
1350 | 1349 | msg = _( |
|
1351 | 1350 | b'cannot specify --changelog or --manifest or --dir ' |
|
1352 | 1351 | b'without a repository' |
|
1353 | 1352 | ) |
|
1354 | 1353 | if msg: |
|
1355 | 1354 | raise error.Abort(msg) |
|
1356 | 1355 | |
|
1357 | 1356 | r = None |
|
1358 | 1357 | if repo: |
|
1359 | 1358 | if cl: |
|
1360 | 1359 | r = repo.unfiltered().changelog |
|
1361 | 1360 | elif dir: |
|
1362 | if requirements.TREEMANIFEST_REQUIREMENT not in repo.requirements: | |
|
1361 | if not scmutil.istreemanifest(repo): | |
|
1363 | 1362 | raise error.Abort( |
|
1364 | 1363 | _( |
|
1365 | 1364 | b"--dir can only be used on repos with " |
|
1366 | 1365 | b"treemanifest enabled" |
|
1367 | 1366 | ) |
|
1368 | 1367 | ) |
|
1369 | 1368 | if not dir.endswith(b'/'): |
|
1370 | 1369 | dir = dir + b'/' |
|
1371 | 1370 | dirlog = repo.manifestlog.getstorage(dir) |
|
1372 | 1371 | if len(dirlog): |
|
1373 | 1372 | r = dirlog |
|
1374 | 1373 | elif mf: |
|
1375 | 1374 | r = repo.manifestlog.getstorage(b'') |
|
1376 | 1375 | elif file_: |
|
1377 | 1376 | filelog = repo.file(file_) |
|
1378 | 1377 | if len(filelog): |
|
1379 | 1378 | r = filelog |
|
1380 | 1379 | |
|
1381 | 1380 | # Not all storage may be revlogs. If requested, try to return an actual |
|
1382 | 1381 | # revlog instance. |
|
1383 | 1382 | if returnrevlog: |
|
1384 | 1383 | if isinstance(r, revlog.revlog): |
|
1385 | 1384 | pass |
|
1386 | 1385 | elif util.safehasattr(r, b'_revlog'): |
|
1387 | 1386 | r = r._revlog # pytype: disable=attribute-error |
|
1388 | 1387 | elif r is not None: |
|
1389 | 1388 | raise error.Abort(_(b'%r does not appear to be a revlog') % r) |
|
1390 | 1389 | |
|
1391 | 1390 | if not r: |
|
1392 | 1391 | if not returnrevlog: |
|
1393 | 1392 | raise error.Abort(_(b'cannot give path to non-revlog')) |
|
1394 | 1393 | |
|
1395 | 1394 | if not file_: |
|
1396 | 1395 | raise error.CommandError(cmd, _(b'invalid arguments')) |
|
1397 | 1396 | if not os.path.isfile(file_): |
|
1398 | 1397 | raise error.Abort(_(b"revlog '%s' not found") % file_) |
|
1399 | 1398 | r = revlog.revlog( |
|
1400 | 1399 | vfsmod.vfs(encoding.getcwd(), audit=False), file_[:-2] + b".i" |
|
1401 | 1400 | ) |
|
1402 | 1401 | return r |
|
1403 | 1402 | |
|
1404 | 1403 | |
|
1405 | 1404 | def openrevlog(repo, cmd, file_, opts): |
|
1406 | 1405 | """Obtain a revlog backing storage of an item. |
|
1407 | 1406 | |
|
1408 | 1407 | This is similar to ``openstorage()`` except it always returns a revlog. |
|
1409 | 1408 | |
|
1410 | 1409 | In most cases, a caller cares about the main storage object - not the |
|
1411 | 1410 | revlog backing it. Therefore, this function should only be used by code |
|
1412 | 1411 | that needs to examine low-level revlog implementation details. e.g. debug |
|
1413 | 1412 | commands. |
|
1414 | 1413 | """ |
|
1415 | 1414 | return openstorage(repo, cmd, file_, opts, returnrevlog=True) |
|
1416 | 1415 | |
|
1417 | 1416 | |
|
1418 | 1417 | def copy(ui, repo, pats, opts, rename=False): |
|
1419 | 1418 | check_incompatible_arguments(opts, b'forget', [b'dry_run']) |
|
1420 | 1419 | |
|
1421 | 1420 | # called with the repo lock held |
|
1422 | 1421 | # |
|
1423 | 1422 | # hgsep => pathname that uses "/" to separate directories |
|
1424 | 1423 | # ossep => pathname that uses os.sep to separate directories |
|
1425 | 1424 | cwd = repo.getcwd() |
|
1426 | 1425 | targets = {} |
|
1427 | 1426 | forget = opts.get(b"forget") |
|
1428 | 1427 | after = opts.get(b"after") |
|
1429 | 1428 | dryrun = opts.get(b"dry_run") |
|
1430 | 1429 | rev = opts.get(b'at_rev') |
|
1431 | 1430 | if rev: |
|
1432 | 1431 | if not forget and not after: |
|
1433 | 1432 | # TODO: Remove this restriction and make it also create the copy |
|
1434 | 1433 | # targets (and remove the rename source if rename==True). |
|
1435 | 1434 | raise error.Abort(_(b'--at-rev requires --after')) |
|
1436 | 1435 | ctx = scmutil.revsingle(repo, rev) |
|
1437 | 1436 | if len(ctx.parents()) > 1: |
|
1438 | 1437 | raise error.Abort(_(b'cannot mark/unmark copy in merge commit')) |
|
1439 | 1438 | else: |
|
1440 | 1439 | ctx = repo[None] |
|
1441 | 1440 | |
|
1442 | 1441 | pctx = ctx.p1() |
|
1443 | 1442 | |
|
1444 | 1443 | uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True) |
|
1445 | 1444 | |
|
1446 | 1445 | if forget: |
|
1447 | 1446 | if ctx.rev() is None: |
|
1448 | 1447 | new_ctx = ctx |
|
1449 | 1448 | else: |
|
1450 | 1449 | if len(ctx.parents()) > 1: |
|
1451 | 1450 | raise error.Abort(_(b'cannot unmark copy in merge commit')) |
|
1452 | 1451 | # avoid cycle context -> subrepo -> cmdutil |
|
1453 | 1452 | from . import context |
|
1454 | 1453 | |
|
1455 | 1454 | rewriteutil.precheck(repo, [ctx.rev()], b'uncopy') |
|
1456 | 1455 | new_ctx = context.overlayworkingctx(repo) |
|
1457 | 1456 | new_ctx.setbase(ctx.p1()) |
|
1458 | 1457 | mergemod.graft(repo, ctx, wctx=new_ctx) |
|
1459 | 1458 | |
|
1460 | 1459 | match = scmutil.match(ctx, pats, opts) |
|
1461 | 1460 | |
|
1462 | 1461 | current_copies = ctx.p1copies() |
|
1463 | 1462 | current_copies.update(ctx.p2copies()) |
|
1464 | 1463 | |
|
1465 | 1464 | uipathfn = scmutil.getuipathfn(repo) |
|
1466 | 1465 | for f in ctx.walk(match): |
|
1467 | 1466 | if f in current_copies: |
|
1468 | 1467 | new_ctx[f].markcopied(None) |
|
1469 | 1468 | elif match.exact(f): |
|
1470 | 1469 | ui.warn( |
|
1471 | 1470 | _( |
|
1472 | 1471 | b'%s: not unmarking as copy - file is not marked as copied\n' |
|
1473 | 1472 | ) |
|
1474 | 1473 | % uipathfn(f) |
|
1475 | 1474 | ) |
|
1476 | 1475 | |
|
1477 | 1476 | if ctx.rev() is not None: |
|
1478 | 1477 | with repo.lock(): |
|
1479 | 1478 | mem_ctx = new_ctx.tomemctx_for_amend(ctx) |
|
1480 | 1479 | new_node = mem_ctx.commit() |
|
1481 | 1480 | |
|
1482 | 1481 | if repo.dirstate.p1() == ctx.node(): |
|
1483 | 1482 | with repo.dirstate.parentchange(): |
|
1484 | 1483 | scmutil.movedirstate(repo, repo[new_node]) |
|
1485 | 1484 | replacements = {ctx.node(): [new_node]} |
|
1486 | 1485 | scmutil.cleanupnodes( |
|
1487 | 1486 | repo, replacements, b'uncopy', fixphase=True |
|
1488 | 1487 | ) |
|
1489 | 1488 | |
|
1490 | 1489 | return |
|
1491 | 1490 | |
|
1492 | 1491 | pats = scmutil.expandpats(pats) |
|
1493 | 1492 | if not pats: |
|
1494 | 1493 | raise error.Abort(_(b'no source or destination specified')) |
|
1495 | 1494 | if len(pats) == 1: |
|
1496 | 1495 | raise error.Abort(_(b'no destination specified')) |
|
1497 | 1496 | dest = pats.pop() |
|
1498 | 1497 | |
|
1499 | 1498 | def walkpat(pat): |
|
1500 | 1499 | srcs = [] |
|
1501 | 1500 | # TODO: Inline and simplify the non-working-copy version of this code |
|
1502 | 1501 | # since it shares very little with the working-copy version of it. |
|
1503 | 1502 | ctx_to_walk = ctx if ctx.rev() is None else pctx |
|
1504 | 1503 | m = scmutil.match(ctx_to_walk, [pat], opts, globbed=True) |
|
1505 | 1504 | for abs in ctx_to_walk.walk(m): |
|
1506 | 1505 | rel = uipathfn(abs) |
|
1507 | 1506 | exact = m.exact(abs) |
|
1508 | 1507 | if abs not in ctx: |
|
1509 | 1508 | if abs in pctx: |
|
1510 | 1509 | if not after: |
|
1511 | 1510 | if exact: |
|
1512 | 1511 | ui.warn( |
|
1513 | 1512 | _( |
|
1514 | 1513 | b'%s: not copying - file has been marked ' |
|
1515 | 1514 | b'for remove\n' |
|
1516 | 1515 | ) |
|
1517 | 1516 | % rel |
|
1518 | 1517 | ) |
|
1519 | 1518 | continue |
|
1520 | 1519 | else: |
|
1521 | 1520 | if exact: |
|
1522 | 1521 | ui.warn( |
|
1523 | 1522 | _(b'%s: not copying - file is not managed\n') % rel |
|
1524 | 1523 | ) |
|
1525 | 1524 | continue |
|
1526 | 1525 | |
|
1527 | 1526 | # abs: hgsep |
|
1528 | 1527 | # rel: ossep |
|
1529 | 1528 | srcs.append((abs, rel, exact)) |
|
1530 | 1529 | return srcs |
|
1531 | 1530 | |
|
1532 | 1531 | if ctx.rev() is not None: |
|
1533 | 1532 | rewriteutil.precheck(repo, [ctx.rev()], b'uncopy') |
|
1534 | 1533 | absdest = pathutil.canonpath(repo.root, cwd, dest) |
|
1535 | 1534 | if ctx.hasdir(absdest): |
|
1536 | 1535 | raise error.Abort( |
|
1537 | 1536 | _(b'%s: --at-rev does not support a directory as destination') |
|
1538 | 1537 | % uipathfn(absdest) |
|
1539 | 1538 | ) |
|
1540 | 1539 | if absdest not in ctx: |
|
1541 | 1540 | raise error.Abort( |
|
1542 | 1541 | _(b'%s: copy destination does not exist in %s') |
|
1543 | 1542 | % (uipathfn(absdest), ctx) |
|
1544 | 1543 | ) |
|
1545 | 1544 | |
|
1546 | 1545 | # avoid cycle context -> subrepo -> cmdutil |
|
1547 | 1546 | from . import context |
|
1548 | 1547 | |
|
1549 | 1548 | copylist = [] |
|
1550 | 1549 | for pat in pats: |
|
1551 | 1550 | srcs = walkpat(pat) |
|
1552 | 1551 | if not srcs: |
|
1553 | 1552 | continue |
|
1554 | 1553 | for abs, rel, exact in srcs: |
|
1555 | 1554 | copylist.append(abs) |
|
1556 | 1555 | |
|
1557 | 1556 | if not copylist: |
|
1558 | 1557 | raise error.Abort(_(b'no files to copy')) |
|
1559 | 1558 | # TODO: Add support for `hg cp --at-rev . foo bar dir` and |
|
1560 | 1559 | # `hg cp --at-rev . dir1 dir2`, preferably unifying the code with the |
|
1561 | 1560 | # existing functions below. |
|
1562 | 1561 | if len(copylist) != 1: |
|
1563 | 1562 | raise error.Abort(_(b'--at-rev requires a single source')) |
|
1564 | 1563 | |
|
1565 | 1564 | new_ctx = context.overlayworkingctx(repo) |
|
1566 | 1565 | new_ctx.setbase(ctx.p1()) |
|
1567 | 1566 | mergemod.graft(repo, ctx, wctx=new_ctx) |
|
1568 | 1567 | |
|
1569 | 1568 | new_ctx.markcopied(absdest, copylist[0]) |
|
1570 | 1569 | |
|
1571 | 1570 | with repo.lock(): |
|
1572 | 1571 | mem_ctx = new_ctx.tomemctx_for_amend(ctx) |
|
1573 | 1572 | new_node = mem_ctx.commit() |
|
1574 | 1573 | |
|
1575 | 1574 | if repo.dirstate.p1() == ctx.node(): |
|
1576 | 1575 | with repo.dirstate.parentchange(): |
|
1577 | 1576 | scmutil.movedirstate(repo, repo[new_node]) |
|
1578 | 1577 | replacements = {ctx.node(): [new_node]} |
|
1579 | 1578 | scmutil.cleanupnodes(repo, replacements, b'copy', fixphase=True) |
|
1580 | 1579 | |
|
1581 | 1580 | return |
|
1582 | 1581 | |
|
1583 | 1582 | # abssrc: hgsep |
|
1584 | 1583 | # relsrc: ossep |
|
1585 | 1584 | # otarget: ossep |
|
1586 | 1585 | def copyfile(abssrc, relsrc, otarget, exact): |
|
1587 | 1586 | abstarget = pathutil.canonpath(repo.root, cwd, otarget) |
|
1588 | 1587 | if b'/' in abstarget: |
|
1589 | 1588 | # We cannot normalize abstarget itself, this would prevent |
|
1590 | 1589 | # case only renames, like a => A. |
|
1591 | 1590 | abspath, absname = abstarget.rsplit(b'/', 1) |
|
1592 | 1591 | abstarget = repo.dirstate.normalize(abspath) + b'/' + absname |
|
1593 | 1592 | reltarget = repo.pathto(abstarget, cwd) |
|
1594 | 1593 | target = repo.wjoin(abstarget) |
|
1595 | 1594 | src = repo.wjoin(abssrc) |
|
1596 | 1595 | state = repo.dirstate[abstarget] |
|
1597 | 1596 | |
|
1598 | 1597 | scmutil.checkportable(ui, abstarget) |
|
1599 | 1598 | |
|
1600 | 1599 | # check for collisions |
|
1601 | 1600 | prevsrc = targets.get(abstarget) |
|
1602 | 1601 | if prevsrc is not None: |
|
1603 | 1602 | ui.warn( |
|
1604 | 1603 | _(b'%s: not overwriting - %s collides with %s\n') |
|
1605 | 1604 | % ( |
|
1606 | 1605 | reltarget, |
|
1607 | 1606 | repo.pathto(abssrc, cwd), |
|
1608 | 1607 | repo.pathto(prevsrc, cwd), |
|
1609 | 1608 | ) |
|
1610 | 1609 | ) |
|
1611 | 1610 | return True # report a failure |
|
1612 | 1611 | |
|
1613 | 1612 | # check for overwrites |
|
1614 | 1613 | exists = os.path.lexists(target) |
|
1615 | 1614 | samefile = False |
|
1616 | 1615 | if exists and abssrc != abstarget: |
|
1617 | 1616 | if repo.dirstate.normalize(abssrc) == repo.dirstate.normalize( |
|
1618 | 1617 | abstarget |
|
1619 | 1618 | ): |
|
1620 | 1619 | if not rename: |
|
1621 | 1620 | ui.warn(_(b"%s: can't copy - same file\n") % reltarget) |
|
1622 | 1621 | return True # report a failure |
|
1623 | 1622 | exists = False |
|
1624 | 1623 | samefile = True |
|
1625 | 1624 | |
|
1626 | 1625 | if not after and exists or after and state in b'mn': |
|
1627 | 1626 | if not opts[b'force']: |
|
1628 | 1627 | if state in b'mn': |
|
1629 | 1628 | msg = _(b'%s: not overwriting - file already committed\n') |
|
1630 | 1629 | if after: |
|
1631 | 1630 | flags = b'--after --force' |
|
1632 | 1631 | else: |
|
1633 | 1632 | flags = b'--force' |
|
1634 | 1633 | if rename: |
|
1635 | 1634 | hint = ( |
|
1636 | 1635 | _( |
|
1637 | 1636 | b"('hg rename %s' to replace the file by " |
|
1638 | 1637 | b'recording a rename)\n' |
|
1639 | 1638 | ) |
|
1640 | 1639 | % flags |
|
1641 | 1640 | ) |
|
1642 | 1641 | else: |
|
1643 | 1642 | hint = ( |
|
1644 | 1643 | _( |
|
1645 | 1644 | b"('hg copy %s' to replace the file by " |
|
1646 | 1645 | b'recording a copy)\n' |
|
1647 | 1646 | ) |
|
1648 | 1647 | % flags |
|
1649 | 1648 | ) |
|
1650 | 1649 | else: |
|
1651 | 1650 | msg = _(b'%s: not overwriting - file exists\n') |
|
1652 | 1651 | if rename: |
|
1653 | 1652 | hint = _( |
|
1654 | 1653 | b"('hg rename --after' to record the rename)\n" |
|
1655 | 1654 | ) |
|
1656 | 1655 | else: |
|
1657 | 1656 | hint = _(b"('hg copy --after' to record the copy)\n") |
|
1658 | 1657 | ui.warn(msg % reltarget) |
|
1659 | 1658 | ui.warn(hint) |
|
1660 | 1659 | return True # report a failure |
|
1661 | 1660 | |
|
1662 | 1661 | if after: |
|
1663 | 1662 | if not exists: |
|
1664 | 1663 | if rename: |
|
1665 | 1664 | ui.warn( |
|
1666 | 1665 | _(b'%s: not recording move - %s does not exist\n') |
|
1667 | 1666 | % (relsrc, reltarget) |
|
1668 | 1667 | ) |
|
1669 | 1668 | else: |
|
1670 | 1669 | ui.warn( |
|
1671 | 1670 | _(b'%s: not recording copy - %s does not exist\n') |
|
1672 | 1671 | % (relsrc, reltarget) |
|
1673 | 1672 | ) |
|
1674 | 1673 | return True # report a failure |
|
1675 | 1674 | elif not dryrun: |
|
1676 | 1675 | try: |
|
1677 | 1676 | if exists: |
|
1678 | 1677 | os.unlink(target) |
|
1679 | 1678 | targetdir = os.path.dirname(target) or b'.' |
|
1680 | 1679 | if not os.path.isdir(targetdir): |
|
1681 | 1680 | os.makedirs(targetdir) |
|
1682 | 1681 | if samefile: |
|
1683 | 1682 | tmp = target + b"~hgrename" |
|
1684 | 1683 | os.rename(src, tmp) |
|
1685 | 1684 | os.rename(tmp, target) |
|
1686 | 1685 | else: |
|
1687 | 1686 | # Preserve stat info on renames, not on copies; this matches |
|
1688 | 1687 | # Linux CLI behavior. |
|
1689 | 1688 | util.copyfile(src, target, copystat=rename) |
|
1690 | 1689 | srcexists = True |
|
1691 | 1690 | except IOError as inst: |
|
1692 | 1691 | if inst.errno == errno.ENOENT: |
|
1693 | 1692 | ui.warn(_(b'%s: deleted in working directory\n') % relsrc) |
|
1694 | 1693 | srcexists = False |
|
1695 | 1694 | else: |
|
1696 | 1695 | ui.warn( |
|
1697 | 1696 | _(b'%s: cannot copy - %s\n') |
|
1698 | 1697 | % (relsrc, encoding.strtolocal(inst.strerror)) |
|
1699 | 1698 | ) |
|
1700 | 1699 | return True # report a failure |
|
1701 | 1700 | |
|
1702 | 1701 | if ui.verbose or not exact: |
|
1703 | 1702 | if rename: |
|
1704 | 1703 | ui.status(_(b'moving %s to %s\n') % (relsrc, reltarget)) |
|
1705 | 1704 | else: |
|
1706 | 1705 | ui.status(_(b'copying %s to %s\n') % (relsrc, reltarget)) |
|
1707 | 1706 | |
|
1708 | 1707 | targets[abstarget] = abssrc |
|
1709 | 1708 | |
|
1710 | 1709 | # fix up dirstate |
|
1711 | 1710 | scmutil.dirstatecopy( |
|
1712 | 1711 | ui, repo, ctx, abssrc, abstarget, dryrun=dryrun, cwd=cwd |
|
1713 | 1712 | ) |
|
1714 | 1713 | if rename and not dryrun: |
|
1715 | 1714 | if not after and srcexists and not samefile: |
|
1716 | 1715 | rmdir = repo.ui.configbool(b'experimental', b'removeemptydirs') |
|
1717 | 1716 | repo.wvfs.unlinkpath(abssrc, rmdir=rmdir) |
|
1718 | 1717 | ctx.forget([abssrc]) |
|
1719 | 1718 | |
|
1720 | 1719 | # pat: ossep |
|
1721 | 1720 | # dest ossep |
|
1722 | 1721 | # srcs: list of (hgsep, hgsep, ossep, bool) |
|
1723 | 1722 | # return: function that takes hgsep and returns ossep |
|
1724 | 1723 | def targetpathfn(pat, dest, srcs): |
|
1725 | 1724 | if os.path.isdir(pat): |
|
1726 | 1725 | abspfx = pathutil.canonpath(repo.root, cwd, pat) |
|
1727 | 1726 | abspfx = util.localpath(abspfx) |
|
1728 | 1727 | if destdirexists: |
|
1729 | 1728 | striplen = len(os.path.split(abspfx)[0]) |
|
1730 | 1729 | else: |
|
1731 | 1730 | striplen = len(abspfx) |
|
1732 | 1731 | if striplen: |
|
1733 | 1732 | striplen += len(pycompat.ossep) |
|
1734 | 1733 | res = lambda p: os.path.join(dest, util.localpath(p)[striplen:]) |
|
1735 | 1734 | elif destdirexists: |
|
1736 | 1735 | res = lambda p: os.path.join( |
|
1737 | 1736 | dest, os.path.basename(util.localpath(p)) |
|
1738 | 1737 | ) |
|
1739 | 1738 | else: |
|
1740 | 1739 | res = lambda p: dest |
|
1741 | 1740 | return res |
|
1742 | 1741 | |
|
1743 | 1742 | # pat: ossep |
|
1744 | 1743 | # dest ossep |
|
1745 | 1744 | # srcs: list of (hgsep, hgsep, ossep, bool) |
|
1746 | 1745 | # return: function that takes hgsep and returns ossep |
|
1747 | 1746 | def targetpathafterfn(pat, dest, srcs): |
|
1748 | 1747 | if matchmod.patkind(pat): |
|
1749 | 1748 | # a mercurial pattern |
|
1750 | 1749 | res = lambda p: os.path.join( |
|
1751 | 1750 | dest, os.path.basename(util.localpath(p)) |
|
1752 | 1751 | ) |
|
1753 | 1752 | else: |
|
1754 | 1753 | abspfx = pathutil.canonpath(repo.root, cwd, pat) |
|
1755 | 1754 | if len(abspfx) < len(srcs[0][0]): |
|
1756 | 1755 | # A directory. Either the target path contains the last |
|
1757 | 1756 | # component of the source path or it does not. |
|
1758 | 1757 | def evalpath(striplen): |
|
1759 | 1758 | score = 0 |
|
1760 | 1759 | for s in srcs: |
|
1761 | 1760 | t = os.path.join(dest, util.localpath(s[0])[striplen:]) |
|
1762 | 1761 | if os.path.lexists(t): |
|
1763 | 1762 | score += 1 |
|
1764 | 1763 | return score |
|
1765 | 1764 | |
|
1766 | 1765 | abspfx = util.localpath(abspfx) |
|
1767 | 1766 | striplen = len(abspfx) |
|
1768 | 1767 | if striplen: |
|
1769 | 1768 | striplen += len(pycompat.ossep) |
|
1770 | 1769 | if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])): |
|
1771 | 1770 | score = evalpath(striplen) |
|
1772 | 1771 | striplen1 = len(os.path.split(abspfx)[0]) |
|
1773 | 1772 | if striplen1: |
|
1774 | 1773 | striplen1 += len(pycompat.ossep) |
|
1775 | 1774 | if evalpath(striplen1) > score: |
|
1776 | 1775 | striplen = striplen1 |
|
1777 | 1776 | res = lambda p: os.path.join(dest, util.localpath(p)[striplen:]) |
|
1778 | 1777 | else: |
|
1779 | 1778 | # a file |
|
1780 | 1779 | if destdirexists: |
|
1781 | 1780 | res = lambda p: os.path.join( |
|
1782 | 1781 | dest, os.path.basename(util.localpath(p)) |
|
1783 | 1782 | ) |
|
1784 | 1783 | else: |
|
1785 | 1784 | res = lambda p: dest |
|
1786 | 1785 | return res |
|
1787 | 1786 | |
|
1788 | 1787 | destdirexists = os.path.isdir(dest) and not os.path.islink(dest) |
|
1789 | 1788 | if not destdirexists: |
|
1790 | 1789 | if len(pats) > 1 or matchmod.patkind(pats[0]): |
|
1791 | 1790 | raise error.Abort( |
|
1792 | 1791 | _( |
|
1793 | 1792 | b'with multiple sources, destination must be an ' |
|
1794 | 1793 | b'existing directory' |
|
1795 | 1794 | ) |
|
1796 | 1795 | ) |
|
1797 | 1796 | if util.endswithsep(dest): |
|
1798 | 1797 | raise error.Abort(_(b'destination %s is not a directory') % dest) |
|
1799 | 1798 | |
|
1800 | 1799 | tfn = targetpathfn |
|
1801 | 1800 | if after: |
|
1802 | 1801 | tfn = targetpathafterfn |
|
1803 | 1802 | copylist = [] |
|
1804 | 1803 | for pat in pats: |
|
1805 | 1804 | srcs = walkpat(pat) |
|
1806 | 1805 | if not srcs: |
|
1807 | 1806 | continue |
|
1808 | 1807 | copylist.append((tfn(pat, dest, srcs), srcs)) |
|
1809 | 1808 | if not copylist: |
|
1810 | 1809 | raise error.Abort(_(b'no files to copy')) |
|
1811 | 1810 | |
|
1812 | 1811 | errors = 0 |
|
1813 | 1812 | for targetpath, srcs in copylist: |
|
1814 | 1813 | for abssrc, relsrc, exact in srcs: |
|
1815 | 1814 | if copyfile(abssrc, relsrc, targetpath(abssrc), exact): |
|
1816 | 1815 | errors += 1 |
|
1817 | 1816 | |
|
1818 | 1817 | return errors != 0 |
|
1819 | 1818 | |
|
1820 | 1819 | |
|
1821 | 1820 | ## facility to let extension process additional data into an import patch |
|
1822 | 1821 | # list of identifier to be executed in order |
|
1823 | 1822 | extrapreimport = [] # run before commit |
|
1824 | 1823 | extrapostimport = [] # run after commit |
|
1825 | 1824 | # mapping from identifier to actual import function |
|
1826 | 1825 | # |
|
1827 | 1826 | # 'preimport' are run before the commit is made and are provided the following |
|
1828 | 1827 | # arguments: |
|
1829 | 1828 | # - repo: the localrepository instance, |
|
1830 | 1829 | # - patchdata: data extracted from patch header (cf m.patch.patchheadermap), |
|
1831 | 1830 | # - extra: the future extra dictionary of the changeset, please mutate it, |
|
1832 | 1831 | # - opts: the import options. |
|
1833 | 1832 | # XXX ideally, we would just pass an ctx ready to be computed, that would allow |
|
1834 | 1833 | # mutation of in memory commit and more. Feel free to rework the code to get |
|
1835 | 1834 | # there. |
|
1836 | 1835 | extrapreimportmap = {} |
|
1837 | 1836 | # 'postimport' are run after the commit is made and are provided the following |
|
1838 | 1837 | # argument: |
|
1839 | 1838 | # - ctx: the changectx created by import. |
|
1840 | 1839 | extrapostimportmap = {} |
|
1841 | 1840 | |
|
1842 | 1841 | |
|
1843 | 1842 | def tryimportone(ui, repo, patchdata, parents, opts, msgs, updatefunc): |
|
1844 | 1843 | """Utility function used by commands.import to import a single patch |
|
1845 | 1844 | |
|
1846 | 1845 | This function is explicitly defined here to help the evolve extension to |
|
1847 | 1846 | wrap this part of the import logic. |
|
1848 | 1847 | |
|
1849 | 1848 | The API is currently a bit ugly because it a simple code translation from |
|
1850 | 1849 | the import command. Feel free to make it better. |
|
1851 | 1850 | |
|
1852 | 1851 | :patchdata: a dictionary containing parsed patch data (such as from |
|
1853 | 1852 | ``patch.extract()``) |
|
1854 | 1853 | :parents: nodes that will be parent of the created commit |
|
1855 | 1854 | :opts: the full dict of option passed to the import command |
|
1856 | 1855 | :msgs: list to save commit message to. |
|
1857 | 1856 | (used in case we need to save it when failing) |
|
1858 | 1857 | :updatefunc: a function that update a repo to a given node |
|
1859 | 1858 | updatefunc(<repo>, <node>) |
|
1860 | 1859 | """ |
|
1861 | 1860 | # avoid cycle context -> subrepo -> cmdutil |
|
1862 | 1861 | from . import context |
|
1863 | 1862 | |
|
1864 | 1863 | tmpname = patchdata.get(b'filename') |
|
1865 | 1864 | message = patchdata.get(b'message') |
|
1866 | 1865 | user = opts.get(b'user') or patchdata.get(b'user') |
|
1867 | 1866 | date = opts.get(b'date') or patchdata.get(b'date') |
|
1868 | 1867 | branch = patchdata.get(b'branch') |
|
1869 | 1868 | nodeid = patchdata.get(b'nodeid') |
|
1870 | 1869 | p1 = patchdata.get(b'p1') |
|
1871 | 1870 | p2 = patchdata.get(b'p2') |
|
1872 | 1871 | |
|
1873 | 1872 | nocommit = opts.get(b'no_commit') |
|
1874 | 1873 | importbranch = opts.get(b'import_branch') |
|
1875 | 1874 | update = not opts.get(b'bypass') |
|
1876 | 1875 | strip = opts[b"strip"] |
|
1877 | 1876 | prefix = opts[b"prefix"] |
|
1878 | 1877 | sim = float(opts.get(b'similarity') or 0) |
|
1879 | 1878 | |
|
1880 | 1879 | if not tmpname: |
|
1881 | 1880 | return None, None, False |
|
1882 | 1881 | |
|
1883 | 1882 | rejects = False |
|
1884 | 1883 | |
|
1885 | 1884 | cmdline_message = logmessage(ui, opts) |
|
1886 | 1885 | if cmdline_message: |
|
1887 | 1886 | # pickup the cmdline msg |
|
1888 | 1887 | message = cmdline_message |
|
1889 | 1888 | elif message: |
|
1890 | 1889 | # pickup the patch msg |
|
1891 | 1890 | message = message.strip() |
|
1892 | 1891 | else: |
|
1893 | 1892 | # launch the editor |
|
1894 | 1893 | message = None |
|
1895 | 1894 | ui.debug(b'message:\n%s\n' % (message or b'')) |
|
1896 | 1895 | |
|
1897 | 1896 | if len(parents) == 1: |
|
1898 | 1897 | parents.append(repo[nullid]) |
|
1899 | 1898 | if opts.get(b'exact'): |
|
1900 | 1899 | if not nodeid or not p1: |
|
1901 | 1900 | raise error.Abort(_(b'not a Mercurial patch')) |
|
1902 | 1901 | p1 = repo[p1] |
|
1903 | 1902 | p2 = repo[p2 or nullid] |
|
1904 | 1903 | elif p2: |
|
1905 | 1904 | try: |
|
1906 | 1905 | p1 = repo[p1] |
|
1907 | 1906 | p2 = repo[p2] |
|
1908 | 1907 | # Without any options, consider p2 only if the |
|
1909 | 1908 | # patch is being applied on top of the recorded |
|
1910 | 1909 | # first parent. |
|
1911 | 1910 | if p1 != parents[0]: |
|
1912 | 1911 | p1 = parents[0] |
|
1913 | 1912 | p2 = repo[nullid] |
|
1914 | 1913 | except error.RepoError: |
|
1915 | 1914 | p1, p2 = parents |
|
1916 | 1915 | if p2.node() == nullid: |
|
1917 | 1916 | ui.warn( |
|
1918 | 1917 | _( |
|
1919 | 1918 | b"warning: import the patch as a normal revision\n" |
|
1920 | 1919 | b"(use --exact to import the patch as a merge)\n" |
|
1921 | 1920 | ) |
|
1922 | 1921 | ) |
|
1923 | 1922 | else: |
|
1924 | 1923 | p1, p2 = parents |
|
1925 | 1924 | |
|
1926 | 1925 | n = None |
|
1927 | 1926 | if update: |
|
1928 | 1927 | if p1 != parents[0]: |
|
1929 | 1928 | updatefunc(repo, p1.node()) |
|
1930 | 1929 | if p2 != parents[1]: |
|
1931 | 1930 | repo.setparents(p1.node(), p2.node()) |
|
1932 | 1931 | |
|
1933 | 1932 | if opts.get(b'exact') or importbranch: |
|
1934 | 1933 | repo.dirstate.setbranch(branch or b'default') |
|
1935 | 1934 | |
|
1936 | 1935 | partial = opts.get(b'partial', False) |
|
1937 | 1936 | files = set() |
|
1938 | 1937 | try: |
|
1939 | 1938 | patch.patch( |
|
1940 | 1939 | ui, |
|
1941 | 1940 | repo, |
|
1942 | 1941 | tmpname, |
|
1943 | 1942 | strip=strip, |
|
1944 | 1943 | prefix=prefix, |
|
1945 | 1944 | files=files, |
|
1946 | 1945 | eolmode=None, |
|
1947 | 1946 | similarity=sim / 100.0, |
|
1948 | 1947 | ) |
|
1949 | 1948 | except error.PatchError as e: |
|
1950 | 1949 | if not partial: |
|
1951 | 1950 | raise error.Abort(pycompat.bytestr(e)) |
|
1952 | 1951 | if partial: |
|
1953 | 1952 | rejects = True |
|
1954 | 1953 | |
|
1955 | 1954 | files = list(files) |
|
1956 | 1955 | if nocommit: |
|
1957 | 1956 | if message: |
|
1958 | 1957 | msgs.append(message) |
|
1959 | 1958 | else: |
|
1960 | 1959 | if opts.get(b'exact') or p2: |
|
1961 | 1960 | # If you got here, you either use --force and know what |
|
1962 | 1961 | # you are doing or used --exact or a merge patch while |
|
1963 | 1962 | # being updated to its first parent. |
|
1964 | 1963 | m = None |
|
1965 | 1964 | else: |
|
1966 | 1965 | m = scmutil.matchfiles(repo, files or []) |
|
1967 | 1966 | editform = mergeeditform(repo[None], b'import.normal') |
|
1968 | 1967 | if opts.get(b'exact'): |
|
1969 | 1968 | editor = None |
|
1970 | 1969 | else: |
|
1971 | 1970 | editor = getcommiteditor( |
|
1972 | 1971 | editform=editform, **pycompat.strkwargs(opts) |
|
1973 | 1972 | ) |
|
1974 | 1973 | extra = {} |
|
1975 | 1974 | for idfunc in extrapreimport: |
|
1976 | 1975 | extrapreimportmap[idfunc](repo, patchdata, extra, opts) |
|
1977 | 1976 | overrides = {} |
|
1978 | 1977 | if partial: |
|
1979 | 1978 | overrides[(b'ui', b'allowemptycommit')] = True |
|
1980 | 1979 | if opts.get(b'secret'): |
|
1981 | 1980 | overrides[(b'phases', b'new-commit')] = b'secret' |
|
1982 | 1981 | with repo.ui.configoverride(overrides, b'import'): |
|
1983 | 1982 | n = repo.commit( |
|
1984 | 1983 | message, user, date, match=m, editor=editor, extra=extra |
|
1985 | 1984 | ) |
|
1986 | 1985 | for idfunc in extrapostimport: |
|
1987 | 1986 | extrapostimportmap[idfunc](repo[n]) |
|
1988 | 1987 | else: |
|
1989 | 1988 | if opts.get(b'exact') or importbranch: |
|
1990 | 1989 | branch = branch or b'default' |
|
1991 | 1990 | else: |
|
1992 | 1991 | branch = p1.branch() |
|
1993 | 1992 | store = patch.filestore() |
|
1994 | 1993 | try: |
|
1995 | 1994 | files = set() |
|
1996 | 1995 | try: |
|
1997 | 1996 | patch.patchrepo( |
|
1998 | 1997 | ui, |
|
1999 | 1998 | repo, |
|
2000 | 1999 | p1, |
|
2001 | 2000 | store, |
|
2002 | 2001 | tmpname, |
|
2003 | 2002 | strip, |
|
2004 | 2003 | prefix, |
|
2005 | 2004 | files, |
|
2006 | 2005 | eolmode=None, |
|
2007 | 2006 | ) |
|
2008 | 2007 | except error.PatchError as e: |
|
2009 | 2008 | raise error.Abort(stringutil.forcebytestr(e)) |
|
2010 | 2009 | if opts.get(b'exact'): |
|
2011 | 2010 | editor = None |
|
2012 | 2011 | else: |
|
2013 | 2012 | editor = getcommiteditor(editform=b'import.bypass') |
|
2014 | 2013 | memctx = context.memctx( |
|
2015 | 2014 | repo, |
|
2016 | 2015 | (p1.node(), p2.node()), |
|
2017 | 2016 | message, |
|
2018 | 2017 | files=files, |
|
2019 | 2018 | filectxfn=store, |
|
2020 | 2019 | user=user, |
|
2021 | 2020 | date=date, |
|
2022 | 2021 | branch=branch, |
|
2023 | 2022 | editor=editor, |
|
2024 | 2023 | ) |
|
2025 | 2024 | |
|
2026 | 2025 | overrides = {} |
|
2027 | 2026 | if opts.get(b'secret'): |
|
2028 | 2027 | overrides[(b'phases', b'new-commit')] = b'secret' |
|
2029 | 2028 | with repo.ui.configoverride(overrides, b'import'): |
|
2030 | 2029 | n = memctx.commit() |
|
2031 | 2030 | finally: |
|
2032 | 2031 | store.close() |
|
2033 | 2032 | if opts.get(b'exact') and nocommit: |
|
2034 | 2033 | # --exact with --no-commit is still useful in that it does merge |
|
2035 | 2034 | # and branch bits |
|
2036 | 2035 | ui.warn(_(b"warning: can't check exact import with --no-commit\n")) |
|
2037 | 2036 | elif opts.get(b'exact') and (not n or hex(n) != nodeid): |
|
2038 | 2037 | raise error.Abort(_(b'patch is damaged or loses information')) |
|
2039 | 2038 | msg = _(b'applied to working directory') |
|
2040 | 2039 | if n: |
|
2041 | 2040 | # i18n: refers to a short changeset id |
|
2042 | 2041 | msg = _(b'created %s') % short(n) |
|
2043 | 2042 | return msg, n, rejects |
|
2044 | 2043 | |
|
2045 | 2044 | |
|
2046 | 2045 | # facility to let extensions include additional data in an exported patch |
|
2047 | 2046 | # list of identifiers to be executed in order |
|
2048 | 2047 | extraexport = [] |
|
2049 | 2048 | # mapping from identifier to actual export function |
|
2050 | 2049 | # function as to return a string to be added to the header or None |
|
2051 | 2050 | # it is given two arguments (sequencenumber, changectx) |
|
2052 | 2051 | extraexportmap = {} |
|
2053 | 2052 | |
|
2054 | 2053 | |
|
2055 | 2054 | def _exportsingle(repo, ctx, fm, match, switch_parent, seqno, diffopts): |
|
2056 | 2055 | node = scmutil.binnode(ctx) |
|
2057 | 2056 | parents = [p.node() for p in ctx.parents() if p] |
|
2058 | 2057 | branch = ctx.branch() |
|
2059 | 2058 | if switch_parent: |
|
2060 | 2059 | parents.reverse() |
|
2061 | 2060 | |
|
2062 | 2061 | if parents: |
|
2063 | 2062 | prev = parents[0] |
|
2064 | 2063 | else: |
|
2065 | 2064 | prev = nullid |
|
2066 | 2065 | |
|
2067 | 2066 | fm.context(ctx=ctx) |
|
2068 | 2067 | fm.plain(b'# HG changeset patch\n') |
|
2069 | 2068 | fm.write(b'user', b'# User %s\n', ctx.user()) |
|
2070 | 2069 | fm.plain(b'# Date %d %d\n' % ctx.date()) |
|
2071 | 2070 | fm.write(b'date', b'# %s\n', fm.formatdate(ctx.date())) |
|
2072 | 2071 | fm.condwrite( |
|
2073 | 2072 | branch and branch != b'default', b'branch', b'# Branch %s\n', branch |
|
2074 | 2073 | ) |
|
2075 | 2074 | fm.write(b'node', b'# Node ID %s\n', hex(node)) |
|
2076 | 2075 | fm.plain(b'# Parent %s\n' % hex(prev)) |
|
2077 | 2076 | if len(parents) > 1: |
|
2078 | 2077 | fm.plain(b'# Parent %s\n' % hex(parents[1])) |
|
2079 | 2078 | fm.data(parents=fm.formatlist(pycompat.maplist(hex, parents), name=b'node')) |
|
2080 | 2079 | |
|
2081 | 2080 | # TODO: redesign extraexportmap function to support formatter |
|
2082 | 2081 | for headerid in extraexport: |
|
2083 | 2082 | header = extraexportmap[headerid](seqno, ctx) |
|
2084 | 2083 | if header is not None: |
|
2085 | 2084 | fm.plain(b'# %s\n' % header) |
|
2086 | 2085 | |
|
2087 | 2086 | fm.write(b'desc', b'%s\n', ctx.description().rstrip()) |
|
2088 | 2087 | fm.plain(b'\n') |
|
2089 | 2088 | |
|
2090 | 2089 | if fm.isplain(): |
|
2091 | 2090 | chunkiter = patch.diffui(repo, prev, node, match, opts=diffopts) |
|
2092 | 2091 | for chunk, label in chunkiter: |
|
2093 | 2092 | fm.plain(chunk, label=label) |
|
2094 | 2093 | else: |
|
2095 | 2094 | chunkiter = patch.diff(repo, prev, node, match, opts=diffopts) |
|
2096 | 2095 | # TODO: make it structured? |
|
2097 | 2096 | fm.data(diff=b''.join(chunkiter)) |
|
2098 | 2097 | |
|
2099 | 2098 | |
|
2100 | 2099 | def _exportfile(repo, revs, fm, dest, switch_parent, diffopts, match): |
|
2101 | 2100 | """Export changesets to stdout or a single file""" |
|
2102 | 2101 | for seqno, rev in enumerate(revs, 1): |
|
2103 | 2102 | ctx = repo[rev] |
|
2104 | 2103 | if not dest.startswith(b'<'): |
|
2105 | 2104 | repo.ui.note(b"%s\n" % dest) |
|
2106 | 2105 | fm.startitem() |
|
2107 | 2106 | _exportsingle(repo, ctx, fm, match, switch_parent, seqno, diffopts) |
|
2108 | 2107 | |
|
2109 | 2108 | |
|
2110 | 2109 | def _exportfntemplate( |
|
2111 | 2110 | repo, revs, basefm, fntemplate, switch_parent, diffopts, match |
|
2112 | 2111 | ): |
|
2113 | 2112 | """Export changesets to possibly multiple files""" |
|
2114 | 2113 | total = len(revs) |
|
2115 | 2114 | revwidth = max(len(str(rev)) for rev in revs) |
|
2116 | 2115 | filemap = util.sortdict() # filename: [(seqno, rev), ...] |
|
2117 | 2116 | |
|
2118 | 2117 | for seqno, rev in enumerate(revs, 1): |
|
2119 | 2118 | ctx = repo[rev] |
|
2120 | 2119 | dest = makefilename( |
|
2121 | 2120 | ctx, fntemplate, total=total, seqno=seqno, revwidth=revwidth |
|
2122 | 2121 | ) |
|
2123 | 2122 | filemap.setdefault(dest, []).append((seqno, rev)) |
|
2124 | 2123 | |
|
2125 | 2124 | for dest in filemap: |
|
2126 | 2125 | with formatter.maybereopen(basefm, dest) as fm: |
|
2127 | 2126 | repo.ui.note(b"%s\n" % dest) |
|
2128 | 2127 | for seqno, rev in filemap[dest]: |
|
2129 | 2128 | fm.startitem() |
|
2130 | 2129 | ctx = repo[rev] |
|
2131 | 2130 | _exportsingle( |
|
2132 | 2131 | repo, ctx, fm, match, switch_parent, seqno, diffopts |
|
2133 | 2132 | ) |
|
2134 | 2133 | |
|
2135 | 2134 | |
|
2136 | 2135 | def _prefetchchangedfiles(repo, revs, match): |
|
2137 | 2136 | allfiles = set() |
|
2138 | 2137 | for rev in revs: |
|
2139 | 2138 | for file in repo[rev].files(): |
|
2140 | 2139 | if not match or match(file): |
|
2141 | 2140 | allfiles.add(file) |
|
2142 | 2141 | match = scmutil.matchfiles(repo, allfiles) |
|
2143 | 2142 | revmatches = [(rev, match) for rev in revs] |
|
2144 | 2143 | scmutil.prefetchfiles(repo, revmatches) |
|
2145 | 2144 | |
|
2146 | 2145 | |
|
2147 | 2146 | def export( |
|
2148 | 2147 | repo, |
|
2149 | 2148 | revs, |
|
2150 | 2149 | basefm, |
|
2151 | 2150 | fntemplate=b'hg-%h.patch', |
|
2152 | 2151 | switch_parent=False, |
|
2153 | 2152 | opts=None, |
|
2154 | 2153 | match=None, |
|
2155 | 2154 | ): |
|
2156 | 2155 | '''export changesets as hg patches |
|
2157 | 2156 | |
|
2158 | 2157 | Args: |
|
2159 | 2158 | repo: The repository from which we're exporting revisions. |
|
2160 | 2159 | revs: A list of revisions to export as revision numbers. |
|
2161 | 2160 | basefm: A formatter to which patches should be written. |
|
2162 | 2161 | fntemplate: An optional string to use for generating patch file names. |
|
2163 | 2162 | switch_parent: If True, show diffs against second parent when not nullid. |
|
2164 | 2163 | Default is false, which always shows diff against p1. |
|
2165 | 2164 | opts: diff options to use for generating the patch. |
|
2166 | 2165 | match: If specified, only export changes to files matching this matcher. |
|
2167 | 2166 | |
|
2168 | 2167 | Returns: |
|
2169 | 2168 | Nothing. |
|
2170 | 2169 | |
|
2171 | 2170 | Side Effect: |
|
2172 | 2171 | "HG Changeset Patch" data is emitted to one of the following |
|
2173 | 2172 | destinations: |
|
2174 | 2173 | fntemplate specified: Each rev is written to a unique file named using |
|
2175 | 2174 | the given template. |
|
2176 | 2175 | Otherwise: All revs will be written to basefm. |
|
2177 | 2176 | ''' |
|
2178 | 2177 | _prefetchchangedfiles(repo, revs, match) |
|
2179 | 2178 | |
|
2180 | 2179 | if not fntemplate: |
|
2181 | 2180 | _exportfile( |
|
2182 | 2181 | repo, revs, basefm, b'<unnamed>', switch_parent, opts, match |
|
2183 | 2182 | ) |
|
2184 | 2183 | else: |
|
2185 | 2184 | _exportfntemplate( |
|
2186 | 2185 | repo, revs, basefm, fntemplate, switch_parent, opts, match |
|
2187 | 2186 | ) |
|
2188 | 2187 | |
|
2189 | 2188 | |
|
2190 | 2189 | def exportfile(repo, revs, fp, switch_parent=False, opts=None, match=None): |
|
2191 | 2190 | """Export changesets to the given file stream""" |
|
2192 | 2191 | _prefetchchangedfiles(repo, revs, match) |
|
2193 | 2192 | |
|
2194 | 2193 | dest = getattr(fp, 'name', b'<unnamed>') |
|
2195 | 2194 | with formatter.formatter(repo.ui, fp, b'export', {}) as fm: |
|
2196 | 2195 | _exportfile(repo, revs, fm, dest, switch_parent, opts, match) |
|
2197 | 2196 | |
|
2198 | 2197 | |
|
2199 | 2198 | def showmarker(fm, marker, index=None): |
|
2200 | 2199 | """utility function to display obsolescence marker in a readable way |
|
2201 | 2200 | |
|
2202 | 2201 | To be used by debug function.""" |
|
2203 | 2202 | if index is not None: |
|
2204 | 2203 | fm.write(b'index', b'%i ', index) |
|
2205 | 2204 | fm.write(b'prednode', b'%s ', hex(marker.prednode())) |
|
2206 | 2205 | succs = marker.succnodes() |
|
2207 | 2206 | fm.condwrite( |
|
2208 | 2207 | succs, |
|
2209 | 2208 | b'succnodes', |
|
2210 | 2209 | b'%s ', |
|
2211 | 2210 | fm.formatlist(map(hex, succs), name=b'node'), |
|
2212 | 2211 | ) |
|
2213 | 2212 | fm.write(b'flag', b'%X ', marker.flags()) |
|
2214 | 2213 | parents = marker.parentnodes() |
|
2215 | 2214 | if parents is not None: |
|
2216 | 2215 | fm.write( |
|
2217 | 2216 | b'parentnodes', |
|
2218 | 2217 | b'{%s} ', |
|
2219 | 2218 | fm.formatlist(map(hex, parents), name=b'node', sep=b', '), |
|
2220 | 2219 | ) |
|
2221 | 2220 | fm.write(b'date', b'(%s) ', fm.formatdate(marker.date())) |
|
2222 | 2221 | meta = marker.metadata().copy() |
|
2223 | 2222 | meta.pop(b'date', None) |
|
2224 | 2223 | smeta = pycompat.rapply(pycompat.maybebytestr, meta) |
|
2225 | 2224 | fm.write( |
|
2226 | 2225 | b'metadata', b'{%s}', fm.formatdict(smeta, fmt=b'%r: %r', sep=b', ') |
|
2227 | 2226 | ) |
|
2228 | 2227 | fm.plain(b'\n') |
|
2229 | 2228 | |
|
2230 | 2229 | |
|
2231 | 2230 | def finddate(ui, repo, date): |
|
2232 | 2231 | """Find the tipmost changeset that matches the given date spec""" |
|
2233 | 2232 | mrevs = repo.revs(b'date(%s)', date) |
|
2234 | 2233 | try: |
|
2235 | 2234 | rev = mrevs.max() |
|
2236 | 2235 | except ValueError: |
|
2237 | 2236 | raise error.Abort(_(b"revision matching date not found")) |
|
2238 | 2237 | |
|
2239 | 2238 | ui.status( |
|
2240 | 2239 | _(b"found revision %d from %s\n") |
|
2241 | 2240 | % (rev, dateutil.datestr(repo[rev].date())) |
|
2242 | 2241 | ) |
|
2243 | 2242 | return b'%d' % rev |
|
2244 | 2243 | |
|
2245 | 2244 | |
|
2246 | 2245 | def increasingwindows(windowsize=8, sizelimit=512): |
|
2247 | 2246 | while True: |
|
2248 | 2247 | yield windowsize |
|
2249 | 2248 | if windowsize < sizelimit: |
|
2250 | 2249 | windowsize *= 2 |
|
2251 | 2250 | |
|
2252 | 2251 | |
|
2253 | 2252 | def _walkrevs(repo, opts): |
|
2254 | 2253 | # Default --rev value depends on --follow but --follow behavior |
|
2255 | 2254 | # depends on revisions resolved from --rev... |
|
2256 | 2255 | follow = opts.get(b'follow') or opts.get(b'follow_first') |
|
2257 | 2256 | revspec = opts.get(b'rev') |
|
2258 | 2257 | if follow and revspec: |
|
2259 | 2258 | revs = scmutil.revrange(repo, revspec) |
|
2260 | 2259 | revs = repo.revs(b'reverse(::%ld)', revs) |
|
2261 | 2260 | elif revspec: |
|
2262 | 2261 | revs = scmutil.revrange(repo, revspec) |
|
2263 | 2262 | elif follow and repo.dirstate.p1() == nullid: |
|
2264 | 2263 | revs = smartset.baseset() |
|
2265 | 2264 | elif follow: |
|
2266 | 2265 | revs = repo.revs(b'reverse(:.)') |
|
2267 | 2266 | else: |
|
2268 | 2267 | revs = smartset.spanset(repo) |
|
2269 | 2268 | revs.reverse() |
|
2270 | 2269 | return revs |
|
2271 | 2270 | |
|
2272 | 2271 | |
|
2273 | 2272 | class FileWalkError(Exception): |
|
2274 | 2273 | pass |
|
2275 | 2274 | |
|
2276 | 2275 | |
|
2277 | 2276 | def walkfilerevs(repo, match, follow, revs, fncache): |
|
2278 | 2277 | '''Walks the file history for the matched files. |
|
2279 | 2278 | |
|
2280 | 2279 | Returns the changeset revs that are involved in the file history. |
|
2281 | 2280 | |
|
2282 | 2281 | Throws FileWalkError if the file history can't be walked using |
|
2283 | 2282 | filelogs alone. |
|
2284 | 2283 | ''' |
|
2285 | 2284 | wanted = set() |
|
2286 | 2285 | copies = [] |
|
2287 | 2286 | minrev, maxrev = min(revs), max(revs) |
|
2288 | 2287 | |
|
2289 | 2288 | def filerevs(filelog, last): |
|
2290 | 2289 | """ |
|
2291 | 2290 | Only files, no patterns. Check the history of each file. |
|
2292 | 2291 | |
|
2293 | 2292 | Examines filelog entries within minrev, maxrev linkrev range |
|
2294 | 2293 | Returns an iterator yielding (linkrev, parentlinkrevs, copied) |
|
2295 | 2294 | tuples in backwards order |
|
2296 | 2295 | """ |
|
2297 | 2296 | cl_count = len(repo) |
|
2298 | 2297 | revs = [] |
|
2299 | 2298 | for j in pycompat.xrange(0, last + 1): |
|
2300 | 2299 | linkrev = filelog.linkrev(j) |
|
2301 | 2300 | if linkrev < minrev: |
|
2302 | 2301 | continue |
|
2303 | 2302 | # only yield rev for which we have the changelog, it can |
|
2304 | 2303 | # happen while doing "hg log" during a pull or commit |
|
2305 | 2304 | if linkrev >= cl_count: |
|
2306 | 2305 | break |
|
2307 | 2306 | |
|
2308 | 2307 | parentlinkrevs = [] |
|
2309 | 2308 | for p in filelog.parentrevs(j): |
|
2310 | 2309 | if p != nullrev: |
|
2311 | 2310 | parentlinkrevs.append(filelog.linkrev(p)) |
|
2312 | 2311 | n = filelog.node(j) |
|
2313 | 2312 | revs.append( |
|
2314 | 2313 | (linkrev, parentlinkrevs, follow and filelog.renamed(n)) |
|
2315 | 2314 | ) |
|
2316 | 2315 | |
|
2317 | 2316 | return reversed(revs) |
|
2318 | 2317 | |
|
2319 | 2318 | def iterfiles(): |
|
2320 | 2319 | pctx = repo[b'.'] |
|
2321 | 2320 | for filename in match.files(): |
|
2322 | 2321 | if follow: |
|
2323 | 2322 | if filename not in pctx: |
|
2324 | 2323 | raise error.Abort( |
|
2325 | 2324 | _( |
|
2326 | 2325 | b'cannot follow file not in parent ' |
|
2327 | 2326 | b'revision: "%s"' |
|
2328 | 2327 | ) |
|
2329 | 2328 | % filename |
|
2330 | 2329 | ) |
|
2331 | 2330 | yield filename, pctx[filename].filenode() |
|
2332 | 2331 | else: |
|
2333 | 2332 | yield filename, None |
|
2334 | 2333 | for filename_node in copies: |
|
2335 | 2334 | yield filename_node |
|
2336 | 2335 | |
|
2337 | 2336 | for file_, node in iterfiles(): |
|
2338 | 2337 | filelog = repo.file(file_) |
|
2339 | 2338 | if not len(filelog): |
|
2340 | 2339 | if node is None: |
|
2341 | 2340 | # A zero count may be a directory or deleted file, so |
|
2342 | 2341 | # try to find matching entries on the slow path. |
|
2343 | 2342 | if follow: |
|
2344 | 2343 | raise error.Abort( |
|
2345 | 2344 | _(b'cannot follow nonexistent file: "%s"') % file_ |
|
2346 | 2345 | ) |
|
2347 | 2346 | raise FileWalkError(b"Cannot walk via filelog") |
|
2348 | 2347 | else: |
|
2349 | 2348 | continue |
|
2350 | 2349 | |
|
2351 | 2350 | if node is None: |
|
2352 | 2351 | last = len(filelog) - 1 |
|
2353 | 2352 | else: |
|
2354 | 2353 | last = filelog.rev(node) |
|
2355 | 2354 | |
|
2356 | 2355 | # keep track of all ancestors of the file |
|
2357 | 2356 | ancestors = {filelog.linkrev(last)} |
|
2358 | 2357 | |
|
2359 | 2358 | # iterate from latest to oldest revision |
|
2360 | 2359 | for rev, flparentlinkrevs, copied in filerevs(filelog, last): |
|
2361 | 2360 | if not follow: |
|
2362 | 2361 | if rev > maxrev: |
|
2363 | 2362 | continue |
|
2364 | 2363 | else: |
|
2365 | 2364 | # Note that last might not be the first interesting |
|
2366 | 2365 | # rev to us: |
|
2367 | 2366 | # if the file has been changed after maxrev, we'll |
|
2368 | 2367 | # have linkrev(last) > maxrev, and we still need |
|
2369 | 2368 | # to explore the file graph |
|
2370 | 2369 | if rev not in ancestors: |
|
2371 | 2370 | continue |
|
2372 | 2371 | # XXX insert 1327 fix here |
|
2373 | 2372 | if flparentlinkrevs: |
|
2374 | 2373 | ancestors.update(flparentlinkrevs) |
|
2375 | 2374 | |
|
2376 | 2375 | fncache.setdefault(rev, []).append(file_) |
|
2377 | 2376 | wanted.add(rev) |
|
2378 | 2377 | if copied: |
|
2379 | 2378 | copies.append(copied) |
|
2380 | 2379 | |
|
2381 | 2380 | return wanted |
|
2382 | 2381 | |
|
2383 | 2382 | |
|
2384 | 2383 | class _followfilter(object): |
|
2385 | 2384 | def __init__(self, repo, onlyfirst=False): |
|
2386 | 2385 | self.repo = repo |
|
2387 | 2386 | self.startrev = nullrev |
|
2388 | 2387 | self.roots = set() |
|
2389 | 2388 | self.onlyfirst = onlyfirst |
|
2390 | 2389 | |
|
2391 | 2390 | def match(self, rev): |
|
2392 | 2391 | def realparents(rev): |
|
2393 | 2392 | try: |
|
2394 | 2393 | if self.onlyfirst: |
|
2395 | 2394 | return self.repo.changelog.parentrevs(rev)[0:1] |
|
2396 | 2395 | else: |
|
2397 | 2396 | return filter( |
|
2398 | 2397 | lambda x: x != nullrev, |
|
2399 | 2398 | self.repo.changelog.parentrevs(rev), |
|
2400 | 2399 | ) |
|
2401 | 2400 | except error.WdirUnsupported: |
|
2402 | 2401 | prevs = [p.rev() for p in self.repo[rev].parents()] |
|
2403 | 2402 | if self.onlyfirst: |
|
2404 | 2403 | return prevs[:1] |
|
2405 | 2404 | else: |
|
2406 | 2405 | return prevs |
|
2407 | 2406 | |
|
2408 | 2407 | if self.startrev == nullrev: |
|
2409 | 2408 | self.startrev = rev |
|
2410 | 2409 | return True |
|
2411 | 2410 | |
|
2412 | 2411 | if rev > self.startrev: |
|
2413 | 2412 | # forward: all descendants |
|
2414 | 2413 | if not self.roots: |
|
2415 | 2414 | self.roots.add(self.startrev) |
|
2416 | 2415 | for parent in realparents(rev): |
|
2417 | 2416 | if parent in self.roots: |
|
2418 | 2417 | self.roots.add(rev) |
|
2419 | 2418 | return True |
|
2420 | 2419 | else: |
|
2421 | 2420 | # backwards: all parents |
|
2422 | 2421 | if not self.roots: |
|
2423 | 2422 | self.roots.update(realparents(self.startrev)) |
|
2424 | 2423 | if rev in self.roots: |
|
2425 | 2424 | self.roots.remove(rev) |
|
2426 | 2425 | self.roots.update(realparents(rev)) |
|
2427 | 2426 | return True |
|
2428 | 2427 | |
|
2429 | 2428 | return False |
|
2430 | 2429 | |
|
2431 | 2430 | |
|
2432 | 2431 | def walkchangerevs(repo, match, opts, prepare): |
|
2433 | 2432 | '''Iterate over files and the revs in which they changed. |
|
2434 | 2433 | |
|
2435 | 2434 | Callers most commonly need to iterate backwards over the history |
|
2436 | 2435 | in which they are interested. Doing so has awful (quadratic-looking) |
|
2437 | 2436 | performance, so we use iterators in a "windowed" way. |
|
2438 | 2437 | |
|
2439 | 2438 | We walk a window of revisions in the desired order. Within the |
|
2440 | 2439 | window, we first walk forwards to gather data, then in the desired |
|
2441 | 2440 | order (usually backwards) to display it. |
|
2442 | 2441 | |
|
2443 | 2442 | This function returns an iterator yielding contexts. Before |
|
2444 | 2443 | yielding each context, the iterator will first call the prepare |
|
2445 | 2444 | function on each context in the window in forward order.''' |
|
2446 | 2445 | |
|
2447 | 2446 | allfiles = opts.get(b'all_files') |
|
2448 | 2447 | follow = opts.get(b'follow') or opts.get(b'follow_first') |
|
2449 | 2448 | revs = _walkrevs(repo, opts) |
|
2450 | 2449 | if not revs: |
|
2451 | 2450 | return [] |
|
2452 | 2451 | wanted = set() |
|
2453 | 2452 | slowpath = match.anypats() or (not match.always() and opts.get(b'removed')) |
|
2454 | 2453 | fncache = {} |
|
2455 | 2454 | change = repo.__getitem__ |
|
2456 | 2455 | |
|
2457 | 2456 | # First step is to fill wanted, the set of revisions that we want to yield. |
|
2458 | 2457 | # When it does not induce extra cost, we also fill fncache for revisions in |
|
2459 | 2458 | # wanted: a cache of filenames that were changed (ctx.files()) and that |
|
2460 | 2459 | # match the file filtering conditions. |
|
2461 | 2460 | |
|
2462 | 2461 | if match.always() or allfiles: |
|
2463 | 2462 | # No files, no patterns. Display all revs. |
|
2464 | 2463 | wanted = revs |
|
2465 | 2464 | elif not slowpath: |
|
2466 | 2465 | # We only have to read through the filelog to find wanted revisions |
|
2467 | 2466 | |
|
2468 | 2467 | try: |
|
2469 | 2468 | wanted = walkfilerevs(repo, match, follow, revs, fncache) |
|
2470 | 2469 | except FileWalkError: |
|
2471 | 2470 | slowpath = True |
|
2472 | 2471 | |
|
2473 | 2472 | # We decided to fall back to the slowpath because at least one |
|
2474 | 2473 | # of the paths was not a file. Check to see if at least one of them |
|
2475 | 2474 | # existed in history, otherwise simply return |
|
2476 | 2475 | for path in match.files(): |
|
2477 | 2476 | if path == b'.' or path in repo.store: |
|
2478 | 2477 | break |
|
2479 | 2478 | else: |
|
2480 | 2479 | return [] |
|
2481 | 2480 | |
|
2482 | 2481 | if slowpath: |
|
2483 | 2482 | # We have to read the changelog to match filenames against |
|
2484 | 2483 | # changed files |
|
2485 | 2484 | |
|
2486 | 2485 | if follow: |
|
2487 | 2486 | raise error.Abort( |
|
2488 | 2487 | _(b'can only follow copies/renames for explicit filenames') |
|
2489 | 2488 | ) |
|
2490 | 2489 | |
|
2491 | 2490 | # The slow path checks files modified in every changeset. |
|
2492 | 2491 | # This is really slow on large repos, so compute the set lazily. |
|
2493 | 2492 | class lazywantedset(object): |
|
2494 | 2493 | def __init__(self): |
|
2495 | 2494 | self.set = set() |
|
2496 | 2495 | self.revs = set(revs) |
|
2497 | 2496 | |
|
2498 | 2497 | # No need to worry about locality here because it will be accessed |
|
2499 | 2498 | # in the same order as the increasing window below. |
|
2500 | 2499 | def __contains__(self, value): |
|
2501 | 2500 | if value in self.set: |
|
2502 | 2501 | return True |
|
2503 | 2502 | elif not value in self.revs: |
|
2504 | 2503 | return False |
|
2505 | 2504 | else: |
|
2506 | 2505 | self.revs.discard(value) |
|
2507 | 2506 | ctx = change(value) |
|
2508 | 2507 | if allfiles: |
|
2509 | 2508 | matches = list(ctx.manifest().walk(match)) |
|
2510 | 2509 | else: |
|
2511 | 2510 | matches = [f for f in ctx.files() if match(f)] |
|
2512 | 2511 | if matches: |
|
2513 | 2512 | fncache[value] = matches |
|
2514 | 2513 | self.set.add(value) |
|
2515 | 2514 | return True |
|
2516 | 2515 | return False |
|
2517 | 2516 | |
|
2518 | 2517 | def discard(self, value): |
|
2519 | 2518 | self.revs.discard(value) |
|
2520 | 2519 | self.set.discard(value) |
|
2521 | 2520 | |
|
2522 | 2521 | wanted = lazywantedset() |
|
2523 | 2522 | |
|
2524 | 2523 | # it might be worthwhile to do this in the iterator if the rev range |
|
2525 | 2524 | # is descending and the prune args are all within that range |
|
2526 | 2525 | for rev in opts.get(b'prune', ()): |
|
2527 | 2526 | rev = repo[rev].rev() |
|
2528 | 2527 | ff = _followfilter(repo) |
|
2529 | 2528 | stop = min(revs[0], revs[-1]) |
|
2530 | 2529 | for x in pycompat.xrange(rev, stop - 1, -1): |
|
2531 | 2530 | if ff.match(x): |
|
2532 | 2531 | wanted = wanted - [x] |
|
2533 | 2532 | |
|
2534 | 2533 | # Now that wanted is correctly initialized, we can iterate over the |
|
2535 | 2534 | # revision range, yielding only revisions in wanted. |
|
2536 | 2535 | def iterate(): |
|
2537 | 2536 | if follow and match.always(): |
|
2538 | 2537 | ff = _followfilter(repo, onlyfirst=opts.get(b'follow_first')) |
|
2539 | 2538 | |
|
2540 | 2539 | def want(rev): |
|
2541 | 2540 | return ff.match(rev) and rev in wanted |
|
2542 | 2541 | |
|
2543 | 2542 | else: |
|
2544 | 2543 | |
|
2545 | 2544 | def want(rev): |
|
2546 | 2545 | return rev in wanted |
|
2547 | 2546 | |
|
2548 | 2547 | it = iter(revs) |
|
2549 | 2548 | stopiteration = False |
|
2550 | 2549 | for windowsize in increasingwindows(): |
|
2551 | 2550 | nrevs = [] |
|
2552 | 2551 | for i in pycompat.xrange(windowsize): |
|
2553 | 2552 | rev = next(it, None) |
|
2554 | 2553 | if rev is None: |
|
2555 | 2554 | stopiteration = True |
|
2556 | 2555 | break |
|
2557 | 2556 | elif want(rev): |
|
2558 | 2557 | nrevs.append(rev) |
|
2559 | 2558 | for rev in sorted(nrevs): |
|
2560 | 2559 | fns = fncache.get(rev) |
|
2561 | 2560 | ctx = change(rev) |
|
2562 | 2561 | if not fns: |
|
2563 | 2562 | |
|
2564 | 2563 | def fns_generator(): |
|
2565 | 2564 | if allfiles: |
|
2566 | 2565 | |
|
2567 | 2566 | def bad(f, msg): |
|
2568 | 2567 | pass |
|
2569 | 2568 | |
|
2570 | 2569 | for f in ctx.matches(matchmod.badmatch(match, bad)): |
|
2571 | 2570 | yield f |
|
2572 | 2571 | else: |
|
2573 | 2572 | for f in ctx.files(): |
|
2574 | 2573 | if match(f): |
|
2575 | 2574 | yield f |
|
2576 | 2575 | |
|
2577 | 2576 | fns = fns_generator() |
|
2578 | 2577 | prepare(ctx, fns) |
|
2579 | 2578 | for rev in nrevs: |
|
2580 | 2579 | yield change(rev) |
|
2581 | 2580 | |
|
2582 | 2581 | if stopiteration: |
|
2583 | 2582 | break |
|
2584 | 2583 | |
|
2585 | 2584 | return iterate() |
|
2586 | 2585 | |
|
2587 | 2586 | |
|
2588 | 2587 | def add(ui, repo, match, prefix, uipathfn, explicitonly, **opts): |
|
2589 | 2588 | bad = [] |
|
2590 | 2589 | |
|
2591 | 2590 | badfn = lambda x, y: bad.append(x) or match.bad(x, y) |
|
2592 | 2591 | names = [] |
|
2593 | 2592 | wctx = repo[None] |
|
2594 | 2593 | cca = None |
|
2595 | 2594 | abort, warn = scmutil.checkportabilityalert(ui) |
|
2596 | 2595 | if abort or warn: |
|
2597 | 2596 | cca = scmutil.casecollisionauditor(ui, abort, repo.dirstate) |
|
2598 | 2597 | |
|
2599 | 2598 | match = repo.narrowmatch(match, includeexact=True) |
|
2600 | 2599 | badmatch = matchmod.badmatch(match, badfn) |
|
2601 | 2600 | dirstate = repo.dirstate |
|
2602 | 2601 | # We don't want to just call wctx.walk here, since it would return a lot of |
|
2603 | 2602 | # clean files, which we aren't interested in and takes time. |
|
2604 | 2603 | for f in sorted( |
|
2605 | 2604 | dirstate.walk( |
|
2606 | 2605 | badmatch, |
|
2607 | 2606 | subrepos=sorted(wctx.substate), |
|
2608 | 2607 | unknown=True, |
|
2609 | 2608 | ignored=False, |
|
2610 | 2609 | full=False, |
|
2611 | 2610 | ) |
|
2612 | 2611 | ): |
|
2613 | 2612 | exact = match.exact(f) |
|
2614 | 2613 | if exact or not explicitonly and f not in wctx and repo.wvfs.lexists(f): |
|
2615 | 2614 | if cca: |
|
2616 | 2615 | cca(f) |
|
2617 | 2616 | names.append(f) |
|
2618 | 2617 | if ui.verbose or not exact: |
|
2619 | 2618 | ui.status( |
|
2620 | 2619 | _(b'adding %s\n') % uipathfn(f), label=b'ui.addremove.added' |
|
2621 | 2620 | ) |
|
2622 | 2621 | |
|
2623 | 2622 | for subpath in sorted(wctx.substate): |
|
2624 | 2623 | sub = wctx.sub(subpath) |
|
2625 | 2624 | try: |
|
2626 | 2625 | submatch = matchmod.subdirmatcher(subpath, match) |
|
2627 | 2626 | subprefix = repo.wvfs.reljoin(prefix, subpath) |
|
2628 | 2627 | subuipathfn = scmutil.subdiruipathfn(subpath, uipathfn) |
|
2629 | 2628 | if opts.get('subrepos'): |
|
2630 | 2629 | bad.extend( |
|
2631 | 2630 | sub.add(ui, submatch, subprefix, subuipathfn, False, **opts) |
|
2632 | 2631 | ) |
|
2633 | 2632 | else: |
|
2634 | 2633 | bad.extend( |
|
2635 | 2634 | sub.add(ui, submatch, subprefix, subuipathfn, True, **opts) |
|
2636 | 2635 | ) |
|
2637 | 2636 | except error.LookupError: |
|
2638 | 2637 | ui.status( |
|
2639 | 2638 | _(b"skipping missing subrepository: %s\n") % uipathfn(subpath) |
|
2640 | 2639 | ) |
|
2641 | 2640 | |
|
2642 | 2641 | if not opts.get('dry_run'): |
|
2643 | 2642 | rejected = wctx.add(names, prefix) |
|
2644 | 2643 | bad.extend(f for f in rejected if f in match.files()) |
|
2645 | 2644 | return bad |
|
2646 | 2645 | |
|
2647 | 2646 | |
|
2648 | 2647 | def addwebdirpath(repo, serverpath, webconf): |
|
2649 | 2648 | webconf[serverpath] = repo.root |
|
2650 | 2649 | repo.ui.debug(b'adding %s = %s\n' % (serverpath, repo.root)) |
|
2651 | 2650 | |
|
2652 | 2651 | for r in repo.revs(b'filelog("path:.hgsub")'): |
|
2653 | 2652 | ctx = repo[r] |
|
2654 | 2653 | for subpath in ctx.substate: |
|
2655 | 2654 | ctx.sub(subpath).addwebdirpath(serverpath, webconf) |
|
2656 | 2655 | |
|
2657 | 2656 | |
|
2658 | 2657 | def forget( |
|
2659 | 2658 | ui, repo, match, prefix, uipathfn, explicitonly, dryrun, interactive |
|
2660 | 2659 | ): |
|
2661 | 2660 | if dryrun and interactive: |
|
2662 | 2661 | raise error.Abort(_(b"cannot specify both --dry-run and --interactive")) |
|
2663 | 2662 | bad = [] |
|
2664 | 2663 | badfn = lambda x, y: bad.append(x) or match.bad(x, y) |
|
2665 | 2664 | wctx = repo[None] |
|
2666 | 2665 | forgot = [] |
|
2667 | 2666 | |
|
2668 | 2667 | s = repo.status(match=matchmod.badmatch(match, badfn), clean=True) |
|
2669 | 2668 | forget = sorted(s.modified + s.added + s.deleted + s.clean) |
|
2670 | 2669 | if explicitonly: |
|
2671 | 2670 | forget = [f for f in forget if match.exact(f)] |
|
2672 | 2671 | |
|
2673 | 2672 | for subpath in sorted(wctx.substate): |
|
2674 | 2673 | sub = wctx.sub(subpath) |
|
2675 | 2674 | submatch = matchmod.subdirmatcher(subpath, match) |
|
2676 | 2675 | subprefix = repo.wvfs.reljoin(prefix, subpath) |
|
2677 | 2676 | subuipathfn = scmutil.subdiruipathfn(subpath, uipathfn) |
|
2678 | 2677 | try: |
|
2679 | 2678 | subbad, subforgot = sub.forget( |
|
2680 | 2679 | submatch, |
|
2681 | 2680 | subprefix, |
|
2682 | 2681 | subuipathfn, |
|
2683 | 2682 | dryrun=dryrun, |
|
2684 | 2683 | interactive=interactive, |
|
2685 | 2684 | ) |
|
2686 | 2685 | bad.extend([subpath + b'/' + f for f in subbad]) |
|
2687 | 2686 | forgot.extend([subpath + b'/' + f for f in subforgot]) |
|
2688 | 2687 | except error.LookupError: |
|
2689 | 2688 | ui.status( |
|
2690 | 2689 | _(b"skipping missing subrepository: %s\n") % uipathfn(subpath) |
|
2691 | 2690 | ) |
|
2692 | 2691 | |
|
2693 | 2692 | if not explicitonly: |
|
2694 | 2693 | for f in match.files(): |
|
2695 | 2694 | if f not in repo.dirstate and not repo.wvfs.isdir(f): |
|
2696 | 2695 | if f not in forgot: |
|
2697 | 2696 | if repo.wvfs.exists(f): |
|
2698 | 2697 | # Don't complain if the exact case match wasn't given. |
|
2699 | 2698 | # But don't do this until after checking 'forgot', so |
|
2700 | 2699 | # that subrepo files aren't normalized, and this op is |
|
2701 | 2700 | # purely from data cached by the status walk above. |
|
2702 | 2701 | if repo.dirstate.normalize(f) in repo.dirstate: |
|
2703 | 2702 | continue |
|
2704 | 2703 | ui.warn( |
|
2705 | 2704 | _( |
|
2706 | 2705 | b'not removing %s: ' |
|
2707 | 2706 | b'file is already untracked\n' |
|
2708 | 2707 | ) |
|
2709 | 2708 | % uipathfn(f) |
|
2710 | 2709 | ) |
|
2711 | 2710 | bad.append(f) |
|
2712 | 2711 | |
|
2713 | 2712 | if interactive: |
|
2714 | 2713 | responses = _( |
|
2715 | 2714 | b'[Ynsa?]' |
|
2716 | 2715 | b'$$ &Yes, forget this file' |
|
2717 | 2716 | b'$$ &No, skip this file' |
|
2718 | 2717 | b'$$ &Skip remaining files' |
|
2719 | 2718 | b'$$ Include &all remaining files' |
|
2720 | 2719 | b'$$ &? (display help)' |
|
2721 | 2720 | ) |
|
2722 | 2721 | for filename in forget[:]: |
|
2723 | 2722 | r = ui.promptchoice( |
|
2724 | 2723 | _(b'forget %s %s') % (uipathfn(filename), responses) |
|
2725 | 2724 | ) |
|
2726 | 2725 | if r == 4: # ? |
|
2727 | 2726 | while r == 4: |
|
2728 | 2727 | for c, t in ui.extractchoices(responses)[1]: |
|
2729 | 2728 | ui.write(b'%s - %s\n' % (c, encoding.lower(t))) |
|
2730 | 2729 | r = ui.promptchoice( |
|
2731 | 2730 | _(b'forget %s %s') % (uipathfn(filename), responses) |
|
2732 | 2731 | ) |
|
2733 | 2732 | if r == 0: # yes |
|
2734 | 2733 | continue |
|
2735 | 2734 | elif r == 1: # no |
|
2736 | 2735 | forget.remove(filename) |
|
2737 | 2736 | elif r == 2: # Skip |
|
2738 | 2737 | fnindex = forget.index(filename) |
|
2739 | 2738 | del forget[fnindex:] |
|
2740 | 2739 | break |
|
2741 | 2740 | elif r == 3: # All |
|
2742 | 2741 | break |
|
2743 | 2742 | |
|
2744 | 2743 | for f in forget: |
|
2745 | 2744 | if ui.verbose or not match.exact(f) or interactive: |
|
2746 | 2745 | ui.status( |
|
2747 | 2746 | _(b'removing %s\n') % uipathfn(f), label=b'ui.addremove.removed' |
|
2748 | 2747 | ) |
|
2749 | 2748 | |
|
2750 | 2749 | if not dryrun: |
|
2751 | 2750 | rejected = wctx.forget(forget, prefix) |
|
2752 | 2751 | bad.extend(f for f in rejected if f in match.files()) |
|
2753 | 2752 | forgot.extend(f for f in forget if f not in rejected) |
|
2754 | 2753 | return bad, forgot |
|
2755 | 2754 | |
|
2756 | 2755 | |
|
2757 | 2756 | def files(ui, ctx, m, uipathfn, fm, fmt, subrepos): |
|
2758 | 2757 | ret = 1 |
|
2759 | 2758 | |
|
2760 | 2759 | needsfctx = ui.verbose or {b'size', b'flags'} & fm.datahint() |
|
2761 | 2760 | if fm.isplain() and not needsfctx: |
|
2762 | 2761 | # Fast path. The speed-up comes from skipping the formatter, and batching |
|
2763 | 2762 | # calls to ui.write. |
|
2764 | 2763 | buf = [] |
|
2765 | 2764 | for f in ctx.matches(m): |
|
2766 | 2765 | buf.append(fmt % uipathfn(f)) |
|
2767 | 2766 | if len(buf) > 100: |
|
2768 | 2767 | ui.write(b''.join(buf)) |
|
2769 | 2768 | del buf[:] |
|
2770 | 2769 | ret = 0 |
|
2771 | 2770 | if buf: |
|
2772 | 2771 | ui.write(b''.join(buf)) |
|
2773 | 2772 | else: |
|
2774 | 2773 | for f in ctx.matches(m): |
|
2775 | 2774 | fm.startitem() |
|
2776 | 2775 | fm.context(ctx=ctx) |
|
2777 | 2776 | if needsfctx: |
|
2778 | 2777 | fc = ctx[f] |
|
2779 | 2778 | fm.write(b'size flags', b'% 10d % 1s ', fc.size(), fc.flags()) |
|
2780 | 2779 | fm.data(path=f) |
|
2781 | 2780 | fm.plain(fmt % uipathfn(f)) |
|
2782 | 2781 | ret = 0 |
|
2783 | 2782 | |
|
2784 | 2783 | for subpath in sorted(ctx.substate): |
|
2785 | 2784 | submatch = matchmod.subdirmatcher(subpath, m) |
|
2786 | 2785 | subuipathfn = scmutil.subdiruipathfn(subpath, uipathfn) |
|
2787 | 2786 | if subrepos or m.exact(subpath) or any(submatch.files()): |
|
2788 | 2787 | sub = ctx.sub(subpath) |
|
2789 | 2788 | try: |
|
2790 | 2789 | recurse = m.exact(subpath) or subrepos |
|
2791 | 2790 | if ( |
|
2792 | 2791 | sub.printfiles(ui, submatch, subuipathfn, fm, fmt, recurse) |
|
2793 | 2792 | == 0 |
|
2794 | 2793 | ): |
|
2795 | 2794 | ret = 0 |
|
2796 | 2795 | except error.LookupError: |
|
2797 | 2796 | ui.status( |
|
2798 | 2797 | _(b"skipping missing subrepository: %s\n") |
|
2799 | 2798 | % uipathfn(subpath) |
|
2800 | 2799 | ) |
|
2801 | 2800 | |
|
2802 | 2801 | return ret |
|
2803 | 2802 | |
|
2804 | 2803 | |
|
2805 | 2804 | def remove( |
|
2806 | 2805 | ui, repo, m, prefix, uipathfn, after, force, subrepos, dryrun, warnings=None |
|
2807 | 2806 | ): |
|
2808 | 2807 | ret = 0 |
|
2809 | 2808 | s = repo.status(match=m, clean=True) |
|
2810 | 2809 | modified, added, deleted, clean = s.modified, s.added, s.deleted, s.clean |
|
2811 | 2810 | |
|
2812 | 2811 | wctx = repo[None] |
|
2813 | 2812 | |
|
2814 | 2813 | if warnings is None: |
|
2815 | 2814 | warnings = [] |
|
2816 | 2815 | warn = True |
|
2817 | 2816 | else: |
|
2818 | 2817 | warn = False |
|
2819 | 2818 | |
|
2820 | 2819 | subs = sorted(wctx.substate) |
|
2821 | 2820 | progress = ui.makeprogress( |
|
2822 | 2821 | _(b'searching'), total=len(subs), unit=_(b'subrepos') |
|
2823 | 2822 | ) |
|
2824 | 2823 | for subpath in subs: |
|
2825 | 2824 | submatch = matchmod.subdirmatcher(subpath, m) |
|
2826 | 2825 | subprefix = repo.wvfs.reljoin(prefix, subpath) |
|
2827 | 2826 | subuipathfn = scmutil.subdiruipathfn(subpath, uipathfn) |
|
2828 | 2827 | if subrepos or m.exact(subpath) or any(submatch.files()): |
|
2829 | 2828 | progress.increment() |
|
2830 | 2829 | sub = wctx.sub(subpath) |
|
2831 | 2830 | try: |
|
2832 | 2831 | if sub.removefiles( |
|
2833 | 2832 | submatch, |
|
2834 | 2833 | subprefix, |
|
2835 | 2834 | subuipathfn, |
|
2836 | 2835 | after, |
|
2837 | 2836 | force, |
|
2838 | 2837 | subrepos, |
|
2839 | 2838 | dryrun, |
|
2840 | 2839 | warnings, |
|
2841 | 2840 | ): |
|
2842 | 2841 | ret = 1 |
|
2843 | 2842 | except error.LookupError: |
|
2844 | 2843 | warnings.append( |
|
2845 | 2844 | _(b"skipping missing subrepository: %s\n") |
|
2846 | 2845 | % uipathfn(subpath) |
|
2847 | 2846 | ) |
|
2848 | 2847 | progress.complete() |
|
2849 | 2848 | |
|
2850 | 2849 | # warn about failure to delete explicit files/dirs |
|
2851 | 2850 | deleteddirs = pathutil.dirs(deleted) |
|
2852 | 2851 | files = m.files() |
|
2853 | 2852 | progress = ui.makeprogress( |
|
2854 | 2853 | _(b'deleting'), total=len(files), unit=_(b'files') |
|
2855 | 2854 | ) |
|
2856 | 2855 | for f in files: |
|
2857 | 2856 | |
|
2858 | 2857 | def insubrepo(): |
|
2859 | 2858 | for subpath in wctx.substate: |
|
2860 | 2859 | if f.startswith(subpath + b'/'): |
|
2861 | 2860 | return True |
|
2862 | 2861 | return False |
|
2863 | 2862 | |
|
2864 | 2863 | progress.increment() |
|
2865 | 2864 | isdir = f in deleteddirs or wctx.hasdir(f) |
|
2866 | 2865 | if f in repo.dirstate or isdir or f == b'.' or insubrepo() or f in subs: |
|
2867 | 2866 | continue |
|
2868 | 2867 | |
|
2869 | 2868 | if repo.wvfs.exists(f): |
|
2870 | 2869 | if repo.wvfs.isdir(f): |
|
2871 | 2870 | warnings.append( |
|
2872 | 2871 | _(b'not removing %s: no tracked files\n') % uipathfn(f) |
|
2873 | 2872 | ) |
|
2874 | 2873 | else: |
|
2875 | 2874 | warnings.append( |
|
2876 | 2875 | _(b'not removing %s: file is untracked\n') % uipathfn(f) |
|
2877 | 2876 | ) |
|
2878 | 2877 | # missing files will generate a warning elsewhere |
|
2879 | 2878 | ret = 1 |
|
2880 | 2879 | progress.complete() |
|
2881 | 2880 | |
|
2882 | 2881 | if force: |
|
2883 | 2882 | list = modified + deleted + clean + added |
|
2884 | 2883 | elif after: |
|
2885 | 2884 | list = deleted |
|
2886 | 2885 | remaining = modified + added + clean |
|
2887 | 2886 | progress = ui.makeprogress( |
|
2888 | 2887 | _(b'skipping'), total=len(remaining), unit=_(b'files') |
|
2889 | 2888 | ) |
|
2890 | 2889 | for f in remaining: |
|
2891 | 2890 | progress.increment() |
|
2892 | 2891 | if ui.verbose or (f in files): |
|
2893 | 2892 | warnings.append( |
|
2894 | 2893 | _(b'not removing %s: file still exists\n') % uipathfn(f) |
|
2895 | 2894 | ) |
|
2896 | 2895 | ret = 1 |
|
2897 | 2896 | progress.complete() |
|
2898 | 2897 | else: |
|
2899 | 2898 | list = deleted + clean |
|
2900 | 2899 | progress = ui.makeprogress( |
|
2901 | 2900 | _(b'skipping'), total=(len(modified) + len(added)), unit=_(b'files') |
|
2902 | 2901 | ) |
|
2903 | 2902 | for f in modified: |
|
2904 | 2903 | progress.increment() |
|
2905 | 2904 | warnings.append( |
|
2906 | 2905 | _( |
|
2907 | 2906 | b'not removing %s: file is modified (use -f' |
|
2908 | 2907 | b' to force removal)\n' |
|
2909 | 2908 | ) |
|
2910 | 2909 | % uipathfn(f) |
|
2911 | 2910 | ) |
|
2912 | 2911 | ret = 1 |
|
2913 | 2912 | for f in added: |
|
2914 | 2913 | progress.increment() |
|
2915 | 2914 | warnings.append( |
|
2916 | 2915 | _( |
|
2917 | 2916 | b"not removing %s: file has been marked for add" |
|
2918 | 2917 | b" (use 'hg forget' to undo add)\n" |
|
2919 | 2918 | ) |
|
2920 | 2919 | % uipathfn(f) |
|
2921 | 2920 | ) |
|
2922 | 2921 | ret = 1 |
|
2923 | 2922 | progress.complete() |
|
2924 | 2923 | |
|
2925 | 2924 | list = sorted(list) |
|
2926 | 2925 | progress = ui.makeprogress( |
|
2927 | 2926 | _(b'deleting'), total=len(list), unit=_(b'files') |
|
2928 | 2927 | ) |
|
2929 | 2928 | for f in list: |
|
2930 | 2929 | if ui.verbose or not m.exact(f): |
|
2931 | 2930 | progress.increment() |
|
2932 | 2931 | ui.status( |
|
2933 | 2932 | _(b'removing %s\n') % uipathfn(f), label=b'ui.addremove.removed' |
|
2934 | 2933 | ) |
|
2935 | 2934 | progress.complete() |
|
2936 | 2935 | |
|
2937 | 2936 | if not dryrun: |
|
2938 | 2937 | with repo.wlock(): |
|
2939 | 2938 | if not after: |
|
2940 | 2939 | for f in list: |
|
2941 | 2940 | if f in added: |
|
2942 | 2941 | continue # we never unlink added files on remove |
|
2943 | 2942 | rmdir = repo.ui.configbool( |
|
2944 | 2943 | b'experimental', b'removeemptydirs' |
|
2945 | 2944 | ) |
|
2946 | 2945 | repo.wvfs.unlinkpath(f, ignoremissing=True, rmdir=rmdir) |
|
2947 | 2946 | repo[None].forget(list) |
|
2948 | 2947 | |
|
2949 | 2948 | if warn: |
|
2950 | 2949 | for warning in warnings: |
|
2951 | 2950 | ui.warn(warning) |
|
2952 | 2951 | |
|
2953 | 2952 | return ret |
|
2954 | 2953 | |
|
2955 | 2954 | |
|
2956 | 2955 | def _catfmtneedsdata(fm): |
|
2957 | 2956 | return not fm.datahint() or b'data' in fm.datahint() |
|
2958 | 2957 | |
|
2959 | 2958 | |
|
2960 | 2959 | def _updatecatformatter(fm, ctx, matcher, path, decode): |
|
2961 | 2960 | """Hook for adding data to the formatter used by ``hg cat``. |
|
2962 | 2961 | |
|
2963 | 2962 | Extensions (e.g., lfs) can wrap this to inject keywords/data, but must call |
|
2964 | 2963 | this method first.""" |
|
2965 | 2964 | |
|
2966 | 2965 | # data() can be expensive to fetch (e.g. lfs), so don't fetch it if it |
|
2967 | 2966 | # wasn't requested. |
|
2968 | 2967 | data = b'' |
|
2969 | 2968 | if _catfmtneedsdata(fm): |
|
2970 | 2969 | data = ctx[path].data() |
|
2971 | 2970 | if decode: |
|
2972 | 2971 | data = ctx.repo().wwritedata(path, data) |
|
2973 | 2972 | fm.startitem() |
|
2974 | 2973 | fm.context(ctx=ctx) |
|
2975 | 2974 | fm.write(b'data', b'%s', data) |
|
2976 | 2975 | fm.data(path=path) |
|
2977 | 2976 | |
|
2978 | 2977 | |
|
2979 | 2978 | def cat(ui, repo, ctx, matcher, basefm, fntemplate, prefix, **opts): |
|
2980 | 2979 | err = 1 |
|
2981 | 2980 | opts = pycompat.byteskwargs(opts) |
|
2982 | 2981 | |
|
2983 | 2982 | def write(path): |
|
2984 | 2983 | filename = None |
|
2985 | 2984 | if fntemplate: |
|
2986 | 2985 | filename = makefilename( |
|
2987 | 2986 | ctx, fntemplate, pathname=os.path.join(prefix, path) |
|
2988 | 2987 | ) |
|
2989 | 2988 | # attempt to create the directory if it does not already exist |
|
2990 | 2989 | try: |
|
2991 | 2990 | os.makedirs(os.path.dirname(filename)) |
|
2992 | 2991 | except OSError: |
|
2993 | 2992 | pass |
|
2994 | 2993 | with formatter.maybereopen(basefm, filename) as fm: |
|
2995 | 2994 | _updatecatformatter(fm, ctx, matcher, path, opts.get(b'decode')) |
|
2996 | 2995 | |
|
2997 | 2996 | # Automation often uses hg cat on single files, so special case it |
|
2998 | 2997 | # for performance to avoid the cost of parsing the manifest. |
|
2999 | 2998 | if len(matcher.files()) == 1 and not matcher.anypats(): |
|
3000 | 2999 | file = matcher.files()[0] |
|
3001 | 3000 | mfl = repo.manifestlog |
|
3002 | 3001 | mfnode = ctx.manifestnode() |
|
3003 | 3002 | try: |
|
3004 | 3003 | if mfnode and mfl[mfnode].find(file)[0]: |
|
3005 | 3004 | if _catfmtneedsdata(basefm): |
|
3006 | 3005 | scmutil.prefetchfiles(repo, [(ctx.rev(), matcher)]) |
|
3007 | 3006 | write(file) |
|
3008 | 3007 | return 0 |
|
3009 | 3008 | except KeyError: |
|
3010 | 3009 | pass |
|
3011 | 3010 | |
|
3012 | 3011 | if _catfmtneedsdata(basefm): |
|
3013 | 3012 | scmutil.prefetchfiles(repo, [(ctx.rev(), matcher)]) |
|
3014 | 3013 | |
|
3015 | 3014 | for abs in ctx.walk(matcher): |
|
3016 | 3015 | write(abs) |
|
3017 | 3016 | err = 0 |
|
3018 | 3017 | |
|
3019 | 3018 | uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True) |
|
3020 | 3019 | for subpath in sorted(ctx.substate): |
|
3021 | 3020 | sub = ctx.sub(subpath) |
|
3022 | 3021 | try: |
|
3023 | 3022 | submatch = matchmod.subdirmatcher(subpath, matcher) |
|
3024 | 3023 | subprefix = os.path.join(prefix, subpath) |
|
3025 | 3024 | if not sub.cat( |
|
3026 | 3025 | submatch, |
|
3027 | 3026 | basefm, |
|
3028 | 3027 | fntemplate, |
|
3029 | 3028 | subprefix, |
|
3030 | 3029 | **pycompat.strkwargs(opts) |
|
3031 | 3030 | ): |
|
3032 | 3031 | err = 0 |
|
3033 | 3032 | except error.RepoLookupError: |
|
3034 | 3033 | ui.status( |
|
3035 | 3034 | _(b"skipping missing subrepository: %s\n") % uipathfn(subpath) |
|
3036 | 3035 | ) |
|
3037 | 3036 | |
|
3038 | 3037 | return err |
|
3039 | 3038 | |
|
3040 | 3039 | |
|
3041 | 3040 | def commit(ui, repo, commitfunc, pats, opts): |
|
3042 | 3041 | '''commit the specified files or all outstanding changes''' |
|
3043 | 3042 | date = opts.get(b'date') |
|
3044 | 3043 | if date: |
|
3045 | 3044 | opts[b'date'] = dateutil.parsedate(date) |
|
3046 | 3045 | message = logmessage(ui, opts) |
|
3047 | 3046 | matcher = scmutil.match(repo[None], pats, opts) |
|
3048 | 3047 | |
|
3049 | 3048 | dsguard = None |
|
3050 | 3049 | # extract addremove carefully -- this function can be called from a command |
|
3051 | 3050 | # that doesn't support addremove |
|
3052 | 3051 | if opts.get(b'addremove'): |
|
3053 | 3052 | dsguard = dirstateguard.dirstateguard(repo, b'commit') |
|
3054 | 3053 | with dsguard or util.nullcontextmanager(): |
|
3055 | 3054 | if dsguard: |
|
3056 | 3055 | relative = scmutil.anypats(pats, opts) |
|
3057 | 3056 | uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=relative) |
|
3058 | 3057 | if scmutil.addremove(repo, matcher, b"", uipathfn, opts) != 0: |
|
3059 | 3058 | raise error.Abort( |
|
3060 | 3059 | _(b"failed to mark all new/missing files as added/removed") |
|
3061 | 3060 | ) |
|
3062 | 3061 | |
|
3063 | 3062 | return commitfunc(ui, repo, message, matcher, opts) |
|
3064 | 3063 | |
|
3065 | 3064 | |
|
3066 | 3065 | def samefile(f, ctx1, ctx2): |
|
3067 | 3066 | if f in ctx1.manifest(): |
|
3068 | 3067 | a = ctx1.filectx(f) |
|
3069 | 3068 | if f in ctx2.manifest(): |
|
3070 | 3069 | b = ctx2.filectx(f) |
|
3071 | 3070 | return not a.cmp(b) and a.flags() == b.flags() |
|
3072 | 3071 | else: |
|
3073 | 3072 | return False |
|
3074 | 3073 | else: |
|
3075 | 3074 | return f not in ctx2.manifest() |
|
3076 | 3075 | |
|
3077 | 3076 | |
|
3078 | 3077 | def amend(ui, repo, old, extra, pats, opts): |
|
3079 | 3078 | # avoid cycle context -> subrepo -> cmdutil |
|
3080 | 3079 | from . import context |
|
3081 | 3080 | |
|
3082 | 3081 | # amend will reuse the existing user if not specified, but the obsolete |
|
3083 | 3082 | # marker creation requires that the current user's name is specified. |
|
3084 | 3083 | if obsolete.isenabled(repo, obsolete.createmarkersopt): |
|
3085 | 3084 | ui.username() # raise exception if username not set |
|
3086 | 3085 | |
|
3087 | 3086 | ui.note(_(b'amending changeset %s\n') % old) |
|
3088 | 3087 | base = old.p1() |
|
3089 | 3088 | |
|
3090 | 3089 | with repo.wlock(), repo.lock(), repo.transaction(b'amend'): |
|
3091 | 3090 | # Participating changesets: |
|
3092 | 3091 | # |
|
3093 | 3092 | # wctx o - workingctx that contains changes from working copy |
|
3094 | 3093 | # | to go into amending commit |
|
3095 | 3094 | # | |
|
3096 | 3095 | # old o - changeset to amend |
|
3097 | 3096 | # | |
|
3098 | 3097 | # base o - first parent of the changeset to amend |
|
3099 | 3098 | wctx = repo[None] |
|
3100 | 3099 | |
|
3101 | 3100 | # Copy to avoid mutating input |
|
3102 | 3101 | extra = extra.copy() |
|
3103 | 3102 | # Update extra dict from amended commit (e.g. to preserve graft |
|
3104 | 3103 | # source) |
|
3105 | 3104 | extra.update(old.extra()) |
|
3106 | 3105 | |
|
3107 | 3106 | # Also update it from the from the wctx |
|
3108 | 3107 | extra.update(wctx.extra()) |
|
3109 | 3108 | |
|
3110 | 3109 | # date-only change should be ignored? |
|
3111 | 3110 | datemaydiffer = resolvecommitoptions(ui, opts) |
|
3112 | 3111 | |
|
3113 | 3112 | date = old.date() |
|
3114 | 3113 | if opts.get(b'date'): |
|
3115 | 3114 | date = dateutil.parsedate(opts.get(b'date')) |
|
3116 | 3115 | user = opts.get(b'user') or old.user() |
|
3117 | 3116 | |
|
3118 | 3117 | if len(old.parents()) > 1: |
|
3119 | 3118 | # ctx.files() isn't reliable for merges, so fall back to the |
|
3120 | 3119 | # slower repo.status() method |
|
3121 | 3120 | st = base.status(old) |
|
3122 | 3121 | files = set(st.modified) | set(st.added) | set(st.removed) |
|
3123 | 3122 | else: |
|
3124 | 3123 | files = set(old.files()) |
|
3125 | 3124 | |
|
3126 | 3125 | # add/remove the files to the working copy if the "addremove" option |
|
3127 | 3126 | # was specified. |
|
3128 | 3127 | matcher = scmutil.match(wctx, pats, opts) |
|
3129 | 3128 | relative = scmutil.anypats(pats, opts) |
|
3130 | 3129 | uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=relative) |
|
3131 | 3130 | if opts.get(b'addremove') and scmutil.addremove( |
|
3132 | 3131 | repo, matcher, b"", uipathfn, opts |
|
3133 | 3132 | ): |
|
3134 | 3133 | raise error.Abort( |
|
3135 | 3134 | _(b"failed to mark all new/missing files as added/removed") |
|
3136 | 3135 | ) |
|
3137 | 3136 | |
|
3138 | 3137 | # Check subrepos. This depends on in-place wctx._status update in |
|
3139 | 3138 | # subrepo.precommit(). To minimize the risk of this hack, we do |
|
3140 | 3139 | # nothing if .hgsub does not exist. |
|
3141 | 3140 | if b'.hgsub' in wctx or b'.hgsub' in old: |
|
3142 | 3141 | subs, commitsubs, newsubstate = subrepoutil.precommit( |
|
3143 | 3142 | ui, wctx, wctx._status, matcher |
|
3144 | 3143 | ) |
|
3145 | 3144 | # amend should abort if commitsubrepos is enabled |
|
3146 | 3145 | assert not commitsubs |
|
3147 | 3146 | if subs: |
|
3148 | 3147 | subrepoutil.writestate(repo, newsubstate) |
|
3149 | 3148 | |
|
3150 | 3149 | ms = mergestatemod.mergestate.read(repo) |
|
3151 | 3150 | mergeutil.checkunresolved(ms) |
|
3152 | 3151 | |
|
3153 | 3152 | filestoamend = {f for f in wctx.files() if matcher(f)} |
|
3154 | 3153 | |
|
3155 | 3154 | changes = len(filestoamend) > 0 |
|
3156 | 3155 | if changes: |
|
3157 | 3156 | # Recompute copies (avoid recording a -> b -> a) |
|
3158 | 3157 | copied = copies.pathcopies(base, wctx, matcher) |
|
3159 | 3158 | if old.p2: |
|
3160 | 3159 | copied.update(copies.pathcopies(old.p2(), wctx, matcher)) |
|
3161 | 3160 | |
|
3162 | 3161 | # Prune files which were reverted by the updates: if old |
|
3163 | 3162 | # introduced file X and the file was renamed in the working |
|
3164 | 3163 | # copy, then those two files are the same and |
|
3165 | 3164 | # we can discard X from our list of files. Likewise if X |
|
3166 | 3165 | # was removed, it's no longer relevant. If X is missing (aka |
|
3167 | 3166 | # deleted), old X must be preserved. |
|
3168 | 3167 | files.update(filestoamend) |
|
3169 | 3168 | files = [ |
|
3170 | 3169 | f |
|
3171 | 3170 | for f in files |
|
3172 | 3171 | if (f not in filestoamend or not samefile(f, wctx, base)) |
|
3173 | 3172 | ] |
|
3174 | 3173 | |
|
3175 | 3174 | def filectxfn(repo, ctx_, path): |
|
3176 | 3175 | try: |
|
3177 | 3176 | # If the file being considered is not amongst the files |
|
3178 | 3177 | # to be amended, we should return the file context from the |
|
3179 | 3178 | # old changeset. This avoids issues when only some files in |
|
3180 | 3179 | # the working copy are being amended but there are also |
|
3181 | 3180 | # changes to other files from the old changeset. |
|
3182 | 3181 | if path not in filestoamend: |
|
3183 | 3182 | return old.filectx(path) |
|
3184 | 3183 | |
|
3185 | 3184 | # Return None for removed files. |
|
3186 | 3185 | if path in wctx.removed(): |
|
3187 | 3186 | return None |
|
3188 | 3187 | |
|
3189 | 3188 | fctx = wctx[path] |
|
3190 | 3189 | flags = fctx.flags() |
|
3191 | 3190 | mctx = context.memfilectx( |
|
3192 | 3191 | repo, |
|
3193 | 3192 | ctx_, |
|
3194 | 3193 | fctx.path(), |
|
3195 | 3194 | fctx.data(), |
|
3196 | 3195 | islink=b'l' in flags, |
|
3197 | 3196 | isexec=b'x' in flags, |
|
3198 | 3197 | copysource=copied.get(path), |
|
3199 | 3198 | ) |
|
3200 | 3199 | return mctx |
|
3201 | 3200 | except KeyError: |
|
3202 | 3201 | return None |
|
3203 | 3202 | |
|
3204 | 3203 | else: |
|
3205 | 3204 | ui.note(_(b'copying changeset %s to %s\n') % (old, base)) |
|
3206 | 3205 | |
|
3207 | 3206 | # Use version of files as in the old cset |
|
3208 | 3207 | def filectxfn(repo, ctx_, path): |
|
3209 | 3208 | try: |
|
3210 | 3209 | return old.filectx(path) |
|
3211 | 3210 | except KeyError: |
|
3212 | 3211 | return None |
|
3213 | 3212 | |
|
3214 | 3213 | # See if we got a message from -m or -l, if not, open the editor with |
|
3215 | 3214 | # the message of the changeset to amend. |
|
3216 | 3215 | message = logmessage(ui, opts) |
|
3217 | 3216 | |
|
3218 | 3217 | editform = mergeeditform(old, b'commit.amend') |
|
3219 | 3218 | |
|
3220 | 3219 | if not message: |
|
3221 | 3220 | message = old.description() |
|
3222 | 3221 | # Default if message isn't provided and --edit is not passed is to |
|
3223 | 3222 | # invoke editor, but allow --no-edit. If somehow we don't have any |
|
3224 | 3223 | # description, let's always start the editor. |
|
3225 | 3224 | doedit = not message or opts.get(b'edit') in [True, None] |
|
3226 | 3225 | else: |
|
3227 | 3226 | # Default if message is provided is to not invoke editor, but allow |
|
3228 | 3227 | # --edit. |
|
3229 | 3228 | doedit = opts.get(b'edit') is True |
|
3230 | 3229 | editor = getcommiteditor(edit=doedit, editform=editform) |
|
3231 | 3230 | |
|
3232 | 3231 | pureextra = extra.copy() |
|
3233 | 3232 | extra[b'amend_source'] = old.hex() |
|
3234 | 3233 | |
|
3235 | 3234 | new = context.memctx( |
|
3236 | 3235 | repo, |
|
3237 | 3236 | parents=[base.node(), old.p2().node()], |
|
3238 | 3237 | text=message, |
|
3239 | 3238 | files=files, |
|
3240 | 3239 | filectxfn=filectxfn, |
|
3241 | 3240 | user=user, |
|
3242 | 3241 | date=date, |
|
3243 | 3242 | extra=extra, |
|
3244 | 3243 | editor=editor, |
|
3245 | 3244 | ) |
|
3246 | 3245 | |
|
3247 | 3246 | newdesc = changelog.stripdesc(new.description()) |
|
3248 | 3247 | if ( |
|
3249 | 3248 | (not changes) |
|
3250 | 3249 | and newdesc == old.description() |
|
3251 | 3250 | and user == old.user() |
|
3252 | 3251 | and (date == old.date() or datemaydiffer) |
|
3253 | 3252 | and pureextra == old.extra() |
|
3254 | 3253 | ): |
|
3255 | 3254 | # nothing changed. continuing here would create a new node |
|
3256 | 3255 | # anyway because of the amend_source noise. |
|
3257 | 3256 | # |
|
3258 | 3257 | # This not what we expect from amend. |
|
3259 | 3258 | return old.node() |
|
3260 | 3259 | |
|
3261 | 3260 | commitphase = None |
|
3262 | 3261 | if opts.get(b'secret'): |
|
3263 | 3262 | commitphase = phases.secret |
|
3264 | 3263 | newid = repo.commitctx(new) |
|
3265 | 3264 | ms.reset() |
|
3266 | 3265 | |
|
3267 | 3266 | # Reroute the working copy parent to the new changeset |
|
3268 | 3267 | repo.setparents(newid, nullid) |
|
3269 | 3268 | mapping = {old.node(): (newid,)} |
|
3270 | 3269 | obsmetadata = None |
|
3271 | 3270 | if opts.get(b'note'): |
|
3272 | 3271 | obsmetadata = {b'note': encoding.fromlocal(opts[b'note'])} |
|
3273 | 3272 | backup = ui.configbool(b'rewrite', b'backup-bundle') |
|
3274 | 3273 | scmutil.cleanupnodes( |
|
3275 | 3274 | repo, |
|
3276 | 3275 | mapping, |
|
3277 | 3276 | b'amend', |
|
3278 | 3277 | metadata=obsmetadata, |
|
3279 | 3278 | fixphase=True, |
|
3280 | 3279 | targetphase=commitphase, |
|
3281 | 3280 | backup=backup, |
|
3282 | 3281 | ) |
|
3283 | 3282 | |
|
3284 | 3283 | # Fixing the dirstate because localrepo.commitctx does not update |
|
3285 | 3284 | # it. This is rather convenient because we did not need to update |
|
3286 | 3285 | # the dirstate for all the files in the new commit which commitctx |
|
3287 | 3286 | # could have done if it updated the dirstate. Now, we can |
|
3288 | 3287 | # selectively update the dirstate only for the amended files. |
|
3289 | 3288 | dirstate = repo.dirstate |
|
3290 | 3289 | |
|
3291 | 3290 | # Update the state of the files which were added and modified in the |
|
3292 | 3291 | # amend to "normal" in the dirstate. We need to use "normallookup" since |
|
3293 | 3292 | # the files may have changed since the command started; using "normal" |
|
3294 | 3293 | # would mark them as clean but with uncommitted contents. |
|
3295 | 3294 | normalfiles = set(wctx.modified() + wctx.added()) & filestoamend |
|
3296 | 3295 | for f in normalfiles: |
|
3297 | 3296 | dirstate.normallookup(f) |
|
3298 | 3297 | |
|
3299 | 3298 | # Update the state of files which were removed in the amend |
|
3300 | 3299 | # to "removed" in the dirstate. |
|
3301 | 3300 | removedfiles = set(wctx.removed()) & filestoamend |
|
3302 | 3301 | for f in removedfiles: |
|
3303 | 3302 | dirstate.drop(f) |
|
3304 | 3303 | |
|
3305 | 3304 | return newid |
|
3306 | 3305 | |
|
3307 | 3306 | |
|
3308 | 3307 | def commiteditor(repo, ctx, subs, editform=b''): |
|
3309 | 3308 | if ctx.description(): |
|
3310 | 3309 | return ctx.description() |
|
3311 | 3310 | return commitforceeditor( |
|
3312 | 3311 | repo, ctx, subs, editform=editform, unchangedmessagedetection=True |
|
3313 | 3312 | ) |
|
3314 | 3313 | |
|
3315 | 3314 | |
|
3316 | 3315 | def commitforceeditor( |
|
3317 | 3316 | repo, |
|
3318 | 3317 | ctx, |
|
3319 | 3318 | subs, |
|
3320 | 3319 | finishdesc=None, |
|
3321 | 3320 | extramsg=None, |
|
3322 | 3321 | editform=b'', |
|
3323 | 3322 | unchangedmessagedetection=False, |
|
3324 | 3323 | ): |
|
3325 | 3324 | if not extramsg: |
|
3326 | 3325 | extramsg = _(b"Leave message empty to abort commit.") |
|
3327 | 3326 | |
|
3328 | 3327 | forms = [e for e in editform.split(b'.') if e] |
|
3329 | 3328 | forms.insert(0, b'changeset') |
|
3330 | 3329 | templatetext = None |
|
3331 | 3330 | while forms: |
|
3332 | 3331 | ref = b'.'.join(forms) |
|
3333 | 3332 | if repo.ui.config(b'committemplate', ref): |
|
3334 | 3333 | templatetext = committext = buildcommittemplate( |
|
3335 | 3334 | repo, ctx, subs, extramsg, ref |
|
3336 | 3335 | ) |
|
3337 | 3336 | break |
|
3338 | 3337 | forms.pop() |
|
3339 | 3338 | else: |
|
3340 | 3339 | committext = buildcommittext(repo, ctx, subs, extramsg) |
|
3341 | 3340 | |
|
3342 | 3341 | # run editor in the repository root |
|
3343 | 3342 | olddir = encoding.getcwd() |
|
3344 | 3343 | os.chdir(repo.root) |
|
3345 | 3344 | |
|
3346 | 3345 | # make in-memory changes visible to external process |
|
3347 | 3346 | tr = repo.currenttransaction() |
|
3348 | 3347 | repo.dirstate.write(tr) |
|
3349 | 3348 | pending = tr and tr.writepending() and repo.root |
|
3350 | 3349 | |
|
3351 | 3350 | editortext = repo.ui.edit( |
|
3352 | 3351 | committext, |
|
3353 | 3352 | ctx.user(), |
|
3354 | 3353 | ctx.extra(), |
|
3355 | 3354 | editform=editform, |
|
3356 | 3355 | pending=pending, |
|
3357 | 3356 | repopath=repo.path, |
|
3358 | 3357 | action=b'commit', |
|
3359 | 3358 | ) |
|
3360 | 3359 | text = editortext |
|
3361 | 3360 | |
|
3362 | 3361 | # strip away anything below this special string (used for editors that want |
|
3363 | 3362 | # to display the diff) |
|
3364 | 3363 | stripbelow = re.search(_linebelow, text, flags=re.MULTILINE) |
|
3365 | 3364 | if stripbelow: |
|
3366 | 3365 | text = text[: stripbelow.start()] |
|
3367 | 3366 | |
|
3368 | 3367 | text = re.sub(b"(?m)^HG:.*(\n|$)", b"", text) |
|
3369 | 3368 | os.chdir(olddir) |
|
3370 | 3369 | |
|
3371 | 3370 | if finishdesc: |
|
3372 | 3371 | text = finishdesc(text) |
|
3373 | 3372 | if not text.strip(): |
|
3374 | 3373 | raise error.Abort(_(b"empty commit message")) |
|
3375 | 3374 | if unchangedmessagedetection and editortext == templatetext: |
|
3376 | 3375 | raise error.Abort(_(b"commit message unchanged")) |
|
3377 | 3376 | |
|
3378 | 3377 | return text |
|
3379 | 3378 | |
|
3380 | 3379 | |
|
3381 | 3380 | def buildcommittemplate(repo, ctx, subs, extramsg, ref): |
|
3382 | 3381 | ui = repo.ui |
|
3383 | 3382 | spec = formatter.reference_templatespec(ref) |
|
3384 | 3383 | t = logcmdutil.changesettemplater(ui, repo, spec) |
|
3385 | 3384 | t.t.cache.update( |
|
3386 | 3385 | (k, templater.unquotestring(v)) |
|
3387 | 3386 | for k, v in repo.ui.configitems(b'committemplate') |
|
3388 | 3387 | ) |
|
3389 | 3388 | |
|
3390 | 3389 | if not extramsg: |
|
3391 | 3390 | extramsg = b'' # ensure that extramsg is string |
|
3392 | 3391 | |
|
3393 | 3392 | ui.pushbuffer() |
|
3394 | 3393 | t.show(ctx, extramsg=extramsg) |
|
3395 | 3394 | return ui.popbuffer() |
|
3396 | 3395 | |
|
3397 | 3396 | |
|
3398 | 3397 | def hgprefix(msg): |
|
3399 | 3398 | return b"\n".join([b"HG: %s" % a for a in msg.split(b"\n") if a]) |
|
3400 | 3399 | |
|
3401 | 3400 | |
|
3402 | 3401 | def buildcommittext(repo, ctx, subs, extramsg): |
|
3403 | 3402 | edittext = [] |
|
3404 | 3403 | modified, added, removed = ctx.modified(), ctx.added(), ctx.removed() |
|
3405 | 3404 | if ctx.description(): |
|
3406 | 3405 | edittext.append(ctx.description()) |
|
3407 | 3406 | edittext.append(b"") |
|
3408 | 3407 | edittext.append(b"") # Empty line between message and comments. |
|
3409 | 3408 | edittext.append( |
|
3410 | 3409 | hgprefix( |
|
3411 | 3410 | _( |
|
3412 | 3411 | b"Enter commit message." |
|
3413 | 3412 | b" Lines beginning with 'HG:' are removed." |
|
3414 | 3413 | ) |
|
3415 | 3414 | ) |
|
3416 | 3415 | ) |
|
3417 | 3416 | edittext.append(hgprefix(extramsg)) |
|
3418 | 3417 | edittext.append(b"HG: --") |
|
3419 | 3418 | edittext.append(hgprefix(_(b"user: %s") % ctx.user())) |
|
3420 | 3419 | if ctx.p2(): |
|
3421 | 3420 | edittext.append(hgprefix(_(b"branch merge"))) |
|
3422 | 3421 | if ctx.branch(): |
|
3423 | 3422 | edittext.append(hgprefix(_(b"branch '%s'") % ctx.branch())) |
|
3424 | 3423 | if bookmarks.isactivewdirparent(repo): |
|
3425 | 3424 | edittext.append(hgprefix(_(b"bookmark '%s'") % repo._activebookmark)) |
|
3426 | 3425 | edittext.extend([hgprefix(_(b"subrepo %s") % s) for s in subs]) |
|
3427 | 3426 | edittext.extend([hgprefix(_(b"added %s") % f) for f in added]) |
|
3428 | 3427 | edittext.extend([hgprefix(_(b"changed %s") % f) for f in modified]) |
|
3429 | 3428 | edittext.extend([hgprefix(_(b"removed %s") % f) for f in removed]) |
|
3430 | 3429 | if not added and not modified and not removed: |
|
3431 | 3430 | edittext.append(hgprefix(_(b"no files changed"))) |
|
3432 | 3431 | edittext.append(b"") |
|
3433 | 3432 | |
|
3434 | 3433 | return b"\n".join(edittext) |
|
3435 | 3434 | |
|
3436 | 3435 | |
|
3437 | 3436 | def commitstatus(repo, node, branch, bheads=None, opts=None): |
|
3438 | 3437 | if opts is None: |
|
3439 | 3438 | opts = {} |
|
3440 | 3439 | ctx = repo[node] |
|
3441 | 3440 | parents = ctx.parents() |
|
3442 | 3441 | |
|
3443 | 3442 | if ( |
|
3444 | 3443 | not opts.get(b'amend') |
|
3445 | 3444 | and bheads |
|
3446 | 3445 | and node not in bheads |
|
3447 | 3446 | and not any( |
|
3448 | 3447 | p.node() in bheads and p.branch() == branch for p in parents |
|
3449 | 3448 | ) |
|
3450 | 3449 | ): |
|
3451 | 3450 | repo.ui.status(_(b'created new head\n')) |
|
3452 | 3451 | # The message is not printed for initial roots. For the other |
|
3453 | 3452 | # changesets, it is printed in the following situations: |
|
3454 | 3453 | # |
|
3455 | 3454 | # Par column: for the 2 parents with ... |
|
3456 | 3455 | # N: null or no parent |
|
3457 | 3456 | # B: parent is on another named branch |
|
3458 | 3457 | # C: parent is a regular non head changeset |
|
3459 | 3458 | # H: parent was a branch head of the current branch |
|
3460 | 3459 | # Msg column: whether we print "created new head" message |
|
3461 | 3460 | # In the following, it is assumed that there already exists some |
|
3462 | 3461 | # initial branch heads of the current branch, otherwise nothing is |
|
3463 | 3462 | # printed anyway. |
|
3464 | 3463 | # |
|
3465 | 3464 | # Par Msg Comment |
|
3466 | 3465 | # N N y additional topo root |
|
3467 | 3466 | # |
|
3468 | 3467 | # B N y additional branch root |
|
3469 | 3468 | # C N y additional topo head |
|
3470 | 3469 | # H N n usual case |
|
3471 | 3470 | # |
|
3472 | 3471 | # B B y weird additional branch root |
|
3473 | 3472 | # C B y branch merge |
|
3474 | 3473 | # H B n merge with named branch |
|
3475 | 3474 | # |
|
3476 | 3475 | # C C y additional head from merge |
|
3477 | 3476 | # C H n merge with a head |
|
3478 | 3477 | # |
|
3479 | 3478 | # H H n head merge: head count decreases |
|
3480 | 3479 | |
|
3481 | 3480 | if not opts.get(b'close_branch'): |
|
3482 | 3481 | for r in parents: |
|
3483 | 3482 | if r.closesbranch() and r.branch() == branch: |
|
3484 | 3483 | repo.ui.status( |
|
3485 | 3484 | _(b'reopening closed branch head %d\n') % r.rev() |
|
3486 | 3485 | ) |
|
3487 | 3486 | |
|
3488 | 3487 | if repo.ui.debugflag: |
|
3489 | 3488 | repo.ui.write( |
|
3490 | 3489 | _(b'committed changeset %d:%s\n') % (ctx.rev(), ctx.hex()) |
|
3491 | 3490 | ) |
|
3492 | 3491 | elif repo.ui.verbose: |
|
3493 | 3492 | repo.ui.write(_(b'committed changeset %d:%s\n') % (ctx.rev(), ctx)) |
|
3494 | 3493 | |
|
3495 | 3494 | |
|
3496 | 3495 | def postcommitstatus(repo, pats, opts): |
|
3497 | 3496 | return repo.status(match=scmutil.match(repo[None], pats, opts)) |
|
3498 | 3497 | |
|
3499 | 3498 | |
|
3500 | 3499 | def revert(ui, repo, ctx, *pats, **opts): |
|
3501 | 3500 | opts = pycompat.byteskwargs(opts) |
|
3502 | 3501 | parent, p2 = repo.dirstate.parents() |
|
3503 | 3502 | node = ctx.node() |
|
3504 | 3503 | |
|
3505 | 3504 | mf = ctx.manifest() |
|
3506 | 3505 | if node == p2: |
|
3507 | 3506 | parent = p2 |
|
3508 | 3507 | |
|
3509 | 3508 | # need all matching names in dirstate and manifest of target rev, |
|
3510 | 3509 | # so have to walk both. do not print errors if files exist in one |
|
3511 | 3510 | # but not other. in both cases, filesets should be evaluated against |
|
3512 | 3511 | # workingctx to get consistent result (issue4497). this means 'set:**' |
|
3513 | 3512 | # cannot be used to select missing files from target rev. |
|
3514 | 3513 | |
|
3515 | 3514 | # `names` is a mapping for all elements in working copy and target revision |
|
3516 | 3515 | # The mapping is in the form: |
|
3517 | 3516 | # <abs path in repo> -> (<path from CWD>, <exactly specified by matcher?>) |
|
3518 | 3517 | names = {} |
|
3519 | 3518 | uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True) |
|
3520 | 3519 | |
|
3521 | 3520 | with repo.wlock(): |
|
3522 | 3521 | ## filling of the `names` mapping |
|
3523 | 3522 | # walk dirstate to fill `names` |
|
3524 | 3523 | |
|
3525 | 3524 | interactive = opts.get(b'interactive', False) |
|
3526 | 3525 | wctx = repo[None] |
|
3527 | 3526 | m = scmutil.match(wctx, pats, opts) |
|
3528 | 3527 | |
|
3529 | 3528 | # we'll need this later |
|
3530 | 3529 | targetsubs = sorted(s for s in wctx.substate if m(s)) |
|
3531 | 3530 | |
|
3532 | 3531 | if not m.always(): |
|
3533 | 3532 | matcher = matchmod.badmatch(m, lambda x, y: False) |
|
3534 | 3533 | for abs in wctx.walk(matcher): |
|
3535 | 3534 | names[abs] = m.exact(abs) |
|
3536 | 3535 | |
|
3537 | 3536 | # walk target manifest to fill `names` |
|
3538 | 3537 | |
|
3539 | 3538 | def badfn(path, msg): |
|
3540 | 3539 | if path in names: |
|
3541 | 3540 | return |
|
3542 | 3541 | if path in ctx.substate: |
|
3543 | 3542 | return |
|
3544 | 3543 | path_ = path + b'/' |
|
3545 | 3544 | for f in names: |
|
3546 | 3545 | if f.startswith(path_): |
|
3547 | 3546 | return |
|
3548 | 3547 | ui.warn(b"%s: %s\n" % (uipathfn(path), msg)) |
|
3549 | 3548 | |
|
3550 | 3549 | for abs in ctx.walk(matchmod.badmatch(m, badfn)): |
|
3551 | 3550 | if abs not in names: |
|
3552 | 3551 | names[abs] = m.exact(abs) |
|
3553 | 3552 | |
|
3554 | 3553 | # Find status of all file in `names`. |
|
3555 | 3554 | m = scmutil.matchfiles(repo, names) |
|
3556 | 3555 | |
|
3557 | 3556 | changes = repo.status( |
|
3558 | 3557 | node1=node, match=m, unknown=True, ignored=True, clean=True |
|
3559 | 3558 | ) |
|
3560 | 3559 | else: |
|
3561 | 3560 | changes = repo.status(node1=node, match=m) |
|
3562 | 3561 | for kind in changes: |
|
3563 | 3562 | for abs in kind: |
|
3564 | 3563 | names[abs] = m.exact(abs) |
|
3565 | 3564 | |
|
3566 | 3565 | m = scmutil.matchfiles(repo, names) |
|
3567 | 3566 | |
|
3568 | 3567 | modified = set(changes.modified) |
|
3569 | 3568 | added = set(changes.added) |
|
3570 | 3569 | removed = set(changes.removed) |
|
3571 | 3570 | _deleted = set(changes.deleted) |
|
3572 | 3571 | unknown = set(changes.unknown) |
|
3573 | 3572 | unknown.update(changes.ignored) |
|
3574 | 3573 | clean = set(changes.clean) |
|
3575 | 3574 | modadded = set() |
|
3576 | 3575 | |
|
3577 | 3576 | # We need to account for the state of the file in the dirstate, |
|
3578 | 3577 | # even when we revert against something else than parent. This will |
|
3579 | 3578 | # slightly alter the behavior of revert (doing back up or not, delete |
|
3580 | 3579 | # or just forget etc). |
|
3581 | 3580 | if parent == node: |
|
3582 | 3581 | dsmodified = modified |
|
3583 | 3582 | dsadded = added |
|
3584 | 3583 | dsremoved = removed |
|
3585 | 3584 | # store all local modifications, useful later for rename detection |
|
3586 | 3585 | localchanges = dsmodified | dsadded |
|
3587 | 3586 | modified, added, removed = set(), set(), set() |
|
3588 | 3587 | else: |
|
3589 | 3588 | changes = repo.status(node1=parent, match=m) |
|
3590 | 3589 | dsmodified = set(changes.modified) |
|
3591 | 3590 | dsadded = set(changes.added) |
|
3592 | 3591 | dsremoved = set(changes.removed) |
|
3593 | 3592 | # store all local modifications, useful later for rename detection |
|
3594 | 3593 | localchanges = dsmodified | dsadded |
|
3595 | 3594 | |
|
3596 | 3595 | # only take into account for removes between wc and target |
|
3597 | 3596 | clean |= dsremoved - removed |
|
3598 | 3597 | dsremoved &= removed |
|
3599 | 3598 | # distinct between dirstate remove and other |
|
3600 | 3599 | removed -= dsremoved |
|
3601 | 3600 | |
|
3602 | 3601 | modadded = added & dsmodified |
|
3603 | 3602 | added -= modadded |
|
3604 | 3603 | |
|
3605 | 3604 | # tell newly modified apart. |
|
3606 | 3605 | dsmodified &= modified |
|
3607 | 3606 | dsmodified |= modified & dsadded # dirstate added may need backup |
|
3608 | 3607 | modified -= dsmodified |
|
3609 | 3608 | |
|
3610 | 3609 | # We need to wait for some post-processing to update this set |
|
3611 | 3610 | # before making the distinction. The dirstate will be used for |
|
3612 | 3611 | # that purpose. |
|
3613 | 3612 | dsadded = added |
|
3614 | 3613 | |
|
3615 | 3614 | # in case of merge, files that are actually added can be reported as |
|
3616 | 3615 | # modified, we need to post process the result |
|
3617 | 3616 | if p2 != nullid: |
|
3618 | 3617 | mergeadd = set(dsmodified) |
|
3619 | 3618 | for path in dsmodified: |
|
3620 | 3619 | if path in mf: |
|
3621 | 3620 | mergeadd.remove(path) |
|
3622 | 3621 | dsadded |= mergeadd |
|
3623 | 3622 | dsmodified -= mergeadd |
|
3624 | 3623 | |
|
3625 | 3624 | # if f is a rename, update `names` to also revert the source |
|
3626 | 3625 | for f in localchanges: |
|
3627 | 3626 | src = repo.dirstate.copied(f) |
|
3628 | 3627 | # XXX should we check for rename down to target node? |
|
3629 | 3628 | if src and src not in names and repo.dirstate[src] == b'r': |
|
3630 | 3629 | dsremoved.add(src) |
|
3631 | 3630 | names[src] = True |
|
3632 | 3631 | |
|
3633 | 3632 | # determine the exact nature of the deleted changesets |
|
3634 | 3633 | deladded = set(_deleted) |
|
3635 | 3634 | for path in _deleted: |
|
3636 | 3635 | if path in mf: |
|
3637 | 3636 | deladded.remove(path) |
|
3638 | 3637 | deleted = _deleted - deladded |
|
3639 | 3638 | |
|
3640 | 3639 | # distinguish between file to forget and the other |
|
3641 | 3640 | added = set() |
|
3642 | 3641 | for abs in dsadded: |
|
3643 | 3642 | if repo.dirstate[abs] != b'a': |
|
3644 | 3643 | added.add(abs) |
|
3645 | 3644 | dsadded -= added |
|
3646 | 3645 | |
|
3647 | 3646 | for abs in deladded: |
|
3648 | 3647 | if repo.dirstate[abs] == b'a': |
|
3649 | 3648 | dsadded.add(abs) |
|
3650 | 3649 | deladded -= dsadded |
|
3651 | 3650 | |
|
3652 | 3651 | # For files marked as removed, we check if an unknown file is present at |
|
3653 | 3652 | # the same path. If a such file exists it may need to be backed up. |
|
3654 | 3653 | # Making the distinction at this stage helps have simpler backup |
|
3655 | 3654 | # logic. |
|
3656 | 3655 | removunk = set() |
|
3657 | 3656 | for abs in removed: |
|
3658 | 3657 | target = repo.wjoin(abs) |
|
3659 | 3658 | if os.path.lexists(target): |
|
3660 | 3659 | removunk.add(abs) |
|
3661 | 3660 | removed -= removunk |
|
3662 | 3661 | |
|
3663 | 3662 | dsremovunk = set() |
|
3664 | 3663 | for abs in dsremoved: |
|
3665 | 3664 | target = repo.wjoin(abs) |
|
3666 | 3665 | if os.path.lexists(target): |
|
3667 | 3666 | dsremovunk.add(abs) |
|
3668 | 3667 | dsremoved -= dsremovunk |
|
3669 | 3668 | |
|
3670 | 3669 | # action to be actually performed by revert |
|
3671 | 3670 | # (<list of file>, message>) tuple |
|
3672 | 3671 | actions = { |
|
3673 | 3672 | b'revert': ([], _(b'reverting %s\n')), |
|
3674 | 3673 | b'add': ([], _(b'adding %s\n')), |
|
3675 | 3674 | b'remove': ([], _(b'removing %s\n')), |
|
3676 | 3675 | b'drop': ([], _(b'removing %s\n')), |
|
3677 | 3676 | b'forget': ([], _(b'forgetting %s\n')), |
|
3678 | 3677 | b'undelete': ([], _(b'undeleting %s\n')), |
|
3679 | 3678 | b'noop': (None, _(b'no changes needed to %s\n')), |
|
3680 | 3679 | b'unknown': (None, _(b'file not managed: %s\n')), |
|
3681 | 3680 | } |
|
3682 | 3681 | |
|
3683 | 3682 | # "constant" that convey the backup strategy. |
|
3684 | 3683 | # All set to `discard` if `no-backup` is set do avoid checking |
|
3685 | 3684 | # no_backup lower in the code. |
|
3686 | 3685 | # These values are ordered for comparison purposes |
|
3687 | 3686 | backupinteractive = 3 # do backup if interactively modified |
|
3688 | 3687 | backup = 2 # unconditionally do backup |
|
3689 | 3688 | check = 1 # check if the existing file differs from target |
|
3690 | 3689 | discard = 0 # never do backup |
|
3691 | 3690 | if opts.get(b'no_backup'): |
|
3692 | 3691 | backupinteractive = backup = check = discard |
|
3693 | 3692 | if interactive: |
|
3694 | 3693 | dsmodifiedbackup = backupinteractive |
|
3695 | 3694 | else: |
|
3696 | 3695 | dsmodifiedbackup = backup |
|
3697 | 3696 | tobackup = set() |
|
3698 | 3697 | |
|
3699 | 3698 | backupanddel = actions[b'remove'] |
|
3700 | 3699 | if not opts.get(b'no_backup'): |
|
3701 | 3700 | backupanddel = actions[b'drop'] |
|
3702 | 3701 | |
|
3703 | 3702 | disptable = ( |
|
3704 | 3703 | # dispatch table: |
|
3705 | 3704 | # file state |
|
3706 | 3705 | # action |
|
3707 | 3706 | # make backup |
|
3708 | 3707 | ## Sets that results that will change file on disk |
|
3709 | 3708 | # Modified compared to target, no local change |
|
3710 | 3709 | (modified, actions[b'revert'], discard), |
|
3711 | 3710 | # Modified compared to target, but local file is deleted |
|
3712 | 3711 | (deleted, actions[b'revert'], discard), |
|
3713 | 3712 | # Modified compared to target, local change |
|
3714 | 3713 | (dsmodified, actions[b'revert'], dsmodifiedbackup), |
|
3715 | 3714 | # Added since target |
|
3716 | 3715 | (added, actions[b'remove'], discard), |
|
3717 | 3716 | # Added in working directory |
|
3718 | 3717 | (dsadded, actions[b'forget'], discard), |
|
3719 | 3718 | # Added since target, have local modification |
|
3720 | 3719 | (modadded, backupanddel, backup), |
|
3721 | 3720 | # Added since target but file is missing in working directory |
|
3722 | 3721 | (deladded, actions[b'drop'], discard), |
|
3723 | 3722 | # Removed since target, before working copy parent |
|
3724 | 3723 | (removed, actions[b'add'], discard), |
|
3725 | 3724 | # Same as `removed` but an unknown file exists at the same path |
|
3726 | 3725 | (removunk, actions[b'add'], check), |
|
3727 | 3726 | # Removed since targe, marked as such in working copy parent |
|
3728 | 3727 | (dsremoved, actions[b'undelete'], discard), |
|
3729 | 3728 | # Same as `dsremoved` but an unknown file exists at the same path |
|
3730 | 3729 | (dsremovunk, actions[b'undelete'], check), |
|
3731 | 3730 | ## the following sets does not result in any file changes |
|
3732 | 3731 | # File with no modification |
|
3733 | 3732 | (clean, actions[b'noop'], discard), |
|
3734 | 3733 | # Existing file, not tracked anywhere |
|
3735 | 3734 | (unknown, actions[b'unknown'], discard), |
|
3736 | 3735 | ) |
|
3737 | 3736 | |
|
3738 | 3737 | for abs, exact in sorted(names.items()): |
|
3739 | 3738 | # target file to be touch on disk (relative to cwd) |
|
3740 | 3739 | target = repo.wjoin(abs) |
|
3741 | 3740 | # search the entry in the dispatch table. |
|
3742 | 3741 | # if the file is in any of these sets, it was touched in the working |
|
3743 | 3742 | # directory parent and we are sure it needs to be reverted. |
|
3744 | 3743 | for table, (xlist, msg), dobackup in disptable: |
|
3745 | 3744 | if abs not in table: |
|
3746 | 3745 | continue |
|
3747 | 3746 | if xlist is not None: |
|
3748 | 3747 | xlist.append(abs) |
|
3749 | 3748 | if dobackup: |
|
3750 | 3749 | # If in interactive mode, don't automatically create |
|
3751 | 3750 | # .orig files (issue4793) |
|
3752 | 3751 | if dobackup == backupinteractive: |
|
3753 | 3752 | tobackup.add(abs) |
|
3754 | 3753 | elif backup <= dobackup or wctx[abs].cmp(ctx[abs]): |
|
3755 | 3754 | absbakname = scmutil.backuppath(ui, repo, abs) |
|
3756 | 3755 | bakname = os.path.relpath( |
|
3757 | 3756 | absbakname, start=repo.root |
|
3758 | 3757 | ) |
|
3759 | 3758 | ui.note( |
|
3760 | 3759 | _(b'saving current version of %s as %s\n') |
|
3761 | 3760 | % (uipathfn(abs), uipathfn(bakname)) |
|
3762 | 3761 | ) |
|
3763 | 3762 | if not opts.get(b'dry_run'): |
|
3764 | 3763 | if interactive: |
|
3765 | 3764 | util.copyfile(target, absbakname) |
|
3766 | 3765 | else: |
|
3767 | 3766 | util.rename(target, absbakname) |
|
3768 | 3767 | if opts.get(b'dry_run'): |
|
3769 | 3768 | if ui.verbose or not exact: |
|
3770 | 3769 | ui.status(msg % uipathfn(abs)) |
|
3771 | 3770 | elif exact: |
|
3772 | 3771 | ui.warn(msg % uipathfn(abs)) |
|
3773 | 3772 | break |
|
3774 | 3773 | |
|
3775 | 3774 | if not opts.get(b'dry_run'): |
|
3776 | 3775 | needdata = (b'revert', b'add', b'undelete') |
|
3777 | 3776 | oplist = [actions[name][0] for name in needdata] |
|
3778 | 3777 | prefetch = scmutil.prefetchfiles |
|
3779 | 3778 | matchfiles = scmutil.matchfiles( |
|
3780 | 3779 | repo, [f for sublist in oplist for f in sublist] |
|
3781 | 3780 | ) |
|
3782 | 3781 | prefetch( |
|
3783 | 3782 | repo, [(ctx.rev(), matchfiles)], |
|
3784 | 3783 | ) |
|
3785 | 3784 | match = scmutil.match(repo[None], pats) |
|
3786 | 3785 | _performrevert( |
|
3787 | 3786 | repo, |
|
3788 | 3787 | ctx, |
|
3789 | 3788 | names, |
|
3790 | 3789 | uipathfn, |
|
3791 | 3790 | actions, |
|
3792 | 3791 | match, |
|
3793 | 3792 | interactive, |
|
3794 | 3793 | tobackup, |
|
3795 | 3794 | ) |
|
3796 | 3795 | |
|
3797 | 3796 | if targetsubs: |
|
3798 | 3797 | # Revert the subrepos on the revert list |
|
3799 | 3798 | for sub in targetsubs: |
|
3800 | 3799 | try: |
|
3801 | 3800 | wctx.sub(sub).revert( |
|
3802 | 3801 | ctx.substate[sub], *pats, **pycompat.strkwargs(opts) |
|
3803 | 3802 | ) |
|
3804 | 3803 | except KeyError: |
|
3805 | 3804 | raise error.Abort( |
|
3806 | 3805 | b"subrepository '%s' does not exist in %s!" |
|
3807 | 3806 | % (sub, short(ctx.node())) |
|
3808 | 3807 | ) |
|
3809 | 3808 | |
|
3810 | 3809 | |
|
3811 | 3810 | def _performrevert( |
|
3812 | 3811 | repo, |
|
3813 | 3812 | ctx, |
|
3814 | 3813 | names, |
|
3815 | 3814 | uipathfn, |
|
3816 | 3815 | actions, |
|
3817 | 3816 | match, |
|
3818 | 3817 | interactive=False, |
|
3819 | 3818 | tobackup=None, |
|
3820 | 3819 | ): |
|
3821 | 3820 | """function that actually perform all the actions computed for revert |
|
3822 | 3821 | |
|
3823 | 3822 | This is an independent function to let extension to plug in and react to |
|
3824 | 3823 | the imminent revert. |
|
3825 | 3824 | |
|
3826 | 3825 | Make sure you have the working directory locked when calling this function. |
|
3827 | 3826 | """ |
|
3828 | 3827 | parent, p2 = repo.dirstate.parents() |
|
3829 | 3828 | node = ctx.node() |
|
3830 | 3829 | excluded_files = [] |
|
3831 | 3830 | |
|
3832 | 3831 | def checkout(f): |
|
3833 | 3832 | fc = ctx[f] |
|
3834 | 3833 | repo.wwrite(f, fc.data(), fc.flags()) |
|
3835 | 3834 | |
|
3836 | 3835 | def doremove(f): |
|
3837 | 3836 | try: |
|
3838 | 3837 | rmdir = repo.ui.configbool(b'experimental', b'removeemptydirs') |
|
3839 | 3838 | repo.wvfs.unlinkpath(f, rmdir=rmdir) |
|
3840 | 3839 | except OSError: |
|
3841 | 3840 | pass |
|
3842 | 3841 | repo.dirstate.remove(f) |
|
3843 | 3842 | |
|
3844 | 3843 | def prntstatusmsg(action, f): |
|
3845 | 3844 | exact = names[f] |
|
3846 | 3845 | if repo.ui.verbose or not exact: |
|
3847 | 3846 | repo.ui.status(actions[action][1] % uipathfn(f)) |
|
3848 | 3847 | |
|
3849 | 3848 | audit_path = pathutil.pathauditor(repo.root, cached=True) |
|
3850 | 3849 | for f in actions[b'forget'][0]: |
|
3851 | 3850 | if interactive: |
|
3852 | 3851 | choice = repo.ui.promptchoice( |
|
3853 | 3852 | _(b"forget added file %s (Yn)?$$ &Yes $$ &No") % uipathfn(f) |
|
3854 | 3853 | ) |
|
3855 | 3854 | if choice == 0: |
|
3856 | 3855 | prntstatusmsg(b'forget', f) |
|
3857 | 3856 | repo.dirstate.drop(f) |
|
3858 | 3857 | else: |
|
3859 | 3858 | excluded_files.append(f) |
|
3860 | 3859 | else: |
|
3861 | 3860 | prntstatusmsg(b'forget', f) |
|
3862 | 3861 | repo.dirstate.drop(f) |
|
3863 | 3862 | for f in actions[b'remove'][0]: |
|
3864 | 3863 | audit_path(f) |
|
3865 | 3864 | if interactive: |
|
3866 | 3865 | choice = repo.ui.promptchoice( |
|
3867 | 3866 | _(b"remove added file %s (Yn)?$$ &Yes $$ &No") % uipathfn(f) |
|
3868 | 3867 | ) |
|
3869 | 3868 | if choice == 0: |
|
3870 | 3869 | prntstatusmsg(b'remove', f) |
|
3871 | 3870 | doremove(f) |
|
3872 | 3871 | else: |
|
3873 | 3872 | excluded_files.append(f) |
|
3874 | 3873 | else: |
|
3875 | 3874 | prntstatusmsg(b'remove', f) |
|
3876 | 3875 | doremove(f) |
|
3877 | 3876 | for f in actions[b'drop'][0]: |
|
3878 | 3877 | audit_path(f) |
|
3879 | 3878 | prntstatusmsg(b'drop', f) |
|
3880 | 3879 | repo.dirstate.remove(f) |
|
3881 | 3880 | |
|
3882 | 3881 | normal = None |
|
3883 | 3882 | if node == parent: |
|
3884 | 3883 | # We're reverting to our parent. If possible, we'd like status |
|
3885 | 3884 | # to report the file as clean. We have to use normallookup for |
|
3886 | 3885 | # merges to avoid losing information about merged/dirty files. |
|
3887 | 3886 | if p2 != nullid: |
|
3888 | 3887 | normal = repo.dirstate.normallookup |
|
3889 | 3888 | else: |
|
3890 | 3889 | normal = repo.dirstate.normal |
|
3891 | 3890 | |
|
3892 | 3891 | newlyaddedandmodifiedfiles = set() |
|
3893 | 3892 | if interactive: |
|
3894 | 3893 | # Prompt the user for changes to revert |
|
3895 | 3894 | torevert = [f for f in actions[b'revert'][0] if f not in excluded_files] |
|
3896 | 3895 | m = scmutil.matchfiles(repo, torevert) |
|
3897 | 3896 | diffopts = patch.difffeatureopts( |
|
3898 | 3897 | repo.ui, |
|
3899 | 3898 | whitespace=True, |
|
3900 | 3899 | section=b'commands', |
|
3901 | 3900 | configprefix=b'revert.interactive.', |
|
3902 | 3901 | ) |
|
3903 | 3902 | diffopts.nodates = True |
|
3904 | 3903 | diffopts.git = True |
|
3905 | 3904 | operation = b'apply' |
|
3906 | 3905 | if node == parent: |
|
3907 | 3906 | if repo.ui.configbool( |
|
3908 | 3907 | b'experimental', b'revert.interactive.select-to-keep' |
|
3909 | 3908 | ): |
|
3910 | 3909 | operation = b'keep' |
|
3911 | 3910 | else: |
|
3912 | 3911 | operation = b'discard' |
|
3913 | 3912 | |
|
3914 | 3913 | if operation == b'apply': |
|
3915 | 3914 | diff = patch.diff(repo, None, ctx.node(), m, opts=diffopts) |
|
3916 | 3915 | else: |
|
3917 | 3916 | diff = patch.diff(repo, ctx.node(), None, m, opts=diffopts) |
|
3918 | 3917 | originalchunks = patch.parsepatch(diff) |
|
3919 | 3918 | |
|
3920 | 3919 | try: |
|
3921 | 3920 | |
|
3922 | 3921 | chunks, opts = recordfilter( |
|
3923 | 3922 | repo.ui, originalchunks, match, operation=operation |
|
3924 | 3923 | ) |
|
3925 | 3924 | if operation == b'discard': |
|
3926 | 3925 | chunks = patch.reversehunks(chunks) |
|
3927 | 3926 | |
|
3928 | 3927 | except error.PatchError as err: |
|
3929 | 3928 | raise error.Abort(_(b'error parsing patch: %s') % err) |
|
3930 | 3929 | |
|
3931 | 3930 | # FIXME: when doing an interactive revert of a copy, there's no way of |
|
3932 | 3931 | # performing a partial revert of the added file, the only option is |
|
3933 | 3932 | # "remove added file <name> (Yn)?", so we don't need to worry about the |
|
3934 | 3933 | # alsorestore value. Ideally we'd be able to partially revert |
|
3935 | 3934 | # copied/renamed files. |
|
3936 | 3935 | newlyaddedandmodifiedfiles, unusedalsorestore = newandmodified( |
|
3937 | 3936 | chunks, originalchunks |
|
3938 | 3937 | ) |
|
3939 | 3938 | if tobackup is None: |
|
3940 | 3939 | tobackup = set() |
|
3941 | 3940 | # Apply changes |
|
3942 | 3941 | fp = stringio() |
|
3943 | 3942 | # chunks are serialized per file, but files aren't sorted |
|
3944 | 3943 | for f in sorted({c.header.filename() for c in chunks if ishunk(c)}): |
|
3945 | 3944 | prntstatusmsg(b'revert', f) |
|
3946 | 3945 | files = set() |
|
3947 | 3946 | for c in chunks: |
|
3948 | 3947 | if ishunk(c): |
|
3949 | 3948 | abs = c.header.filename() |
|
3950 | 3949 | # Create a backup file only if this hunk should be backed up |
|
3951 | 3950 | if c.header.filename() in tobackup: |
|
3952 | 3951 | target = repo.wjoin(abs) |
|
3953 | 3952 | bakname = scmutil.backuppath(repo.ui, repo, abs) |
|
3954 | 3953 | util.copyfile(target, bakname) |
|
3955 | 3954 | tobackup.remove(abs) |
|
3956 | 3955 | if abs not in files: |
|
3957 | 3956 | files.add(abs) |
|
3958 | 3957 | if operation == b'keep': |
|
3959 | 3958 | checkout(abs) |
|
3960 | 3959 | c.write(fp) |
|
3961 | 3960 | dopatch = fp.tell() |
|
3962 | 3961 | fp.seek(0) |
|
3963 | 3962 | if dopatch: |
|
3964 | 3963 | try: |
|
3965 | 3964 | patch.internalpatch(repo.ui, repo, fp, 1, eolmode=None) |
|
3966 | 3965 | except error.PatchError as err: |
|
3967 | 3966 | raise error.Abort(pycompat.bytestr(err)) |
|
3968 | 3967 | del fp |
|
3969 | 3968 | else: |
|
3970 | 3969 | for f in actions[b'revert'][0]: |
|
3971 | 3970 | prntstatusmsg(b'revert', f) |
|
3972 | 3971 | checkout(f) |
|
3973 | 3972 | if normal: |
|
3974 | 3973 | normal(f) |
|
3975 | 3974 | |
|
3976 | 3975 | for f in actions[b'add'][0]: |
|
3977 | 3976 | # Don't checkout modified files, they are already created by the diff |
|
3978 | 3977 | if f not in newlyaddedandmodifiedfiles: |
|
3979 | 3978 | prntstatusmsg(b'add', f) |
|
3980 | 3979 | checkout(f) |
|
3981 | 3980 | repo.dirstate.add(f) |
|
3982 | 3981 | |
|
3983 | 3982 | normal = repo.dirstate.normallookup |
|
3984 | 3983 | if node == parent and p2 == nullid: |
|
3985 | 3984 | normal = repo.dirstate.normal |
|
3986 | 3985 | for f in actions[b'undelete'][0]: |
|
3987 | 3986 | if interactive: |
|
3988 | 3987 | choice = repo.ui.promptchoice( |
|
3989 | 3988 | _(b"add back removed file %s (Yn)?$$ &Yes $$ &No") % f |
|
3990 | 3989 | ) |
|
3991 | 3990 | if choice == 0: |
|
3992 | 3991 | prntstatusmsg(b'undelete', f) |
|
3993 | 3992 | checkout(f) |
|
3994 | 3993 | normal(f) |
|
3995 | 3994 | else: |
|
3996 | 3995 | excluded_files.append(f) |
|
3997 | 3996 | else: |
|
3998 | 3997 | prntstatusmsg(b'undelete', f) |
|
3999 | 3998 | checkout(f) |
|
4000 | 3999 | normal(f) |
|
4001 | 4000 | |
|
4002 | 4001 | copied = copies.pathcopies(repo[parent], ctx) |
|
4003 | 4002 | |
|
4004 | 4003 | for f in ( |
|
4005 | 4004 | actions[b'add'][0] + actions[b'undelete'][0] + actions[b'revert'][0] |
|
4006 | 4005 | ): |
|
4007 | 4006 | if f in copied: |
|
4008 | 4007 | repo.dirstate.copy(copied[f], f) |
|
4009 | 4008 | |
|
4010 | 4009 | |
|
4011 | 4010 | # a list of (ui, repo, otherpeer, opts, missing) functions called by |
|
4012 | 4011 | # commands.outgoing. "missing" is "missing" of the result of |
|
4013 | 4012 | # "findcommonoutgoing()" |
|
4014 | 4013 | outgoinghooks = util.hooks() |
|
4015 | 4014 | |
|
4016 | 4015 | # a list of (ui, repo) functions called by commands.summary |
|
4017 | 4016 | summaryhooks = util.hooks() |
|
4018 | 4017 | |
|
4019 | 4018 | # a list of (ui, repo, opts, changes) functions called by commands.summary. |
|
4020 | 4019 | # |
|
4021 | 4020 | # functions should return tuple of booleans below, if 'changes' is None: |
|
4022 | 4021 | # (whether-incomings-are-needed, whether-outgoings-are-needed) |
|
4023 | 4022 | # |
|
4024 | 4023 | # otherwise, 'changes' is a tuple of tuples below: |
|
4025 | 4024 | # - (sourceurl, sourcebranch, sourcepeer, incoming) |
|
4026 | 4025 | # - (desturl, destbranch, destpeer, outgoing) |
|
4027 | 4026 | summaryremotehooks = util.hooks() |
|
4028 | 4027 | |
|
4029 | 4028 | |
|
4030 | 4029 | def checkunfinished(repo, commit=False, skipmerge=False): |
|
4031 | 4030 | '''Look for an unfinished multistep operation, like graft, and abort |
|
4032 | 4031 | if found. It's probably good to check this right before |
|
4033 | 4032 | bailifchanged(). |
|
4034 | 4033 | ''' |
|
4035 | 4034 | # Check for non-clearable states first, so things like rebase will take |
|
4036 | 4035 | # precedence over update. |
|
4037 | 4036 | for state in statemod._unfinishedstates: |
|
4038 | 4037 | if ( |
|
4039 | 4038 | state._clearable |
|
4040 | 4039 | or (commit and state._allowcommit) |
|
4041 | 4040 | or state._reportonly |
|
4042 | 4041 | ): |
|
4043 | 4042 | continue |
|
4044 | 4043 | if state.isunfinished(repo): |
|
4045 | 4044 | raise error.Abort(state.msg(), hint=state.hint()) |
|
4046 | 4045 | |
|
4047 | 4046 | for s in statemod._unfinishedstates: |
|
4048 | 4047 | if ( |
|
4049 | 4048 | not s._clearable |
|
4050 | 4049 | or (commit and s._allowcommit) |
|
4051 | 4050 | or (s._opname == b'merge' and skipmerge) |
|
4052 | 4051 | or s._reportonly |
|
4053 | 4052 | ): |
|
4054 | 4053 | continue |
|
4055 | 4054 | if s.isunfinished(repo): |
|
4056 | 4055 | raise error.Abort(s.msg(), hint=s.hint()) |
|
4057 | 4056 | |
|
4058 | 4057 | |
|
4059 | 4058 | def clearunfinished(repo): |
|
4060 | 4059 | '''Check for unfinished operations (as above), and clear the ones |
|
4061 | 4060 | that are clearable. |
|
4062 | 4061 | ''' |
|
4063 | 4062 | for state in statemod._unfinishedstates: |
|
4064 | 4063 | if state._reportonly: |
|
4065 | 4064 | continue |
|
4066 | 4065 | if not state._clearable and state.isunfinished(repo): |
|
4067 | 4066 | raise error.Abort(state.msg(), hint=state.hint()) |
|
4068 | 4067 | |
|
4069 | 4068 | for s in statemod._unfinishedstates: |
|
4070 | 4069 | if s._opname == b'merge' or state._reportonly: |
|
4071 | 4070 | continue |
|
4072 | 4071 | if s._clearable and s.isunfinished(repo): |
|
4073 | 4072 | util.unlink(repo.vfs.join(s._fname)) |
|
4074 | 4073 | |
|
4075 | 4074 | |
|
4076 | 4075 | def getunfinishedstate(repo): |
|
4077 | 4076 | ''' Checks for unfinished operations and returns statecheck object |
|
4078 | 4077 | for it''' |
|
4079 | 4078 | for state in statemod._unfinishedstates: |
|
4080 | 4079 | if state.isunfinished(repo): |
|
4081 | 4080 | return state |
|
4082 | 4081 | return None |
|
4083 | 4082 | |
|
4084 | 4083 | |
|
4085 | 4084 | def howtocontinue(repo): |
|
4086 | 4085 | '''Check for an unfinished operation and return the command to finish |
|
4087 | 4086 | it. |
|
4088 | 4087 | |
|
4089 | 4088 | statemod._unfinishedstates list is checked for an unfinished operation |
|
4090 | 4089 | and the corresponding message to finish it is generated if a method to |
|
4091 | 4090 | continue is supported by the operation. |
|
4092 | 4091 | |
|
4093 | 4092 | Returns a (msg, warning) tuple. 'msg' is a string and 'warning' is |
|
4094 | 4093 | a boolean. |
|
4095 | 4094 | ''' |
|
4096 | 4095 | contmsg = _(b"continue: %s") |
|
4097 | 4096 | for state in statemod._unfinishedstates: |
|
4098 | 4097 | if not state._continueflag: |
|
4099 | 4098 | continue |
|
4100 | 4099 | if state.isunfinished(repo): |
|
4101 | 4100 | return contmsg % state.continuemsg(), True |
|
4102 | 4101 | if repo[None].dirty(missing=True, merge=False, branch=False): |
|
4103 | 4102 | return contmsg % _(b"hg commit"), False |
|
4104 | 4103 | return None, None |
|
4105 | 4104 | |
|
4106 | 4105 | |
|
4107 | 4106 | def checkafterresolved(repo): |
|
4108 | 4107 | '''Inform the user about the next action after completing hg resolve |
|
4109 | 4108 | |
|
4110 | 4109 | If there's a an unfinished operation that supports continue flag, |
|
4111 | 4110 | howtocontinue will yield repo.ui.warn as the reporter. |
|
4112 | 4111 | |
|
4113 | 4112 | Otherwise, it will yield repo.ui.note. |
|
4114 | 4113 | ''' |
|
4115 | 4114 | msg, warning = howtocontinue(repo) |
|
4116 | 4115 | if msg is not None: |
|
4117 | 4116 | if warning: |
|
4118 | 4117 | repo.ui.warn(b"%s\n" % msg) |
|
4119 | 4118 | else: |
|
4120 | 4119 | repo.ui.note(b"%s\n" % msg) |
|
4121 | 4120 | |
|
4122 | 4121 | |
|
4123 | 4122 | def wrongtooltocontinue(repo, task): |
|
4124 | 4123 | '''Raise an abort suggesting how to properly continue if there is an |
|
4125 | 4124 | active task. |
|
4126 | 4125 | |
|
4127 | 4126 | Uses howtocontinue() to find the active task. |
|
4128 | 4127 | |
|
4129 | 4128 | If there's no task (repo.ui.note for 'hg commit'), it does not offer |
|
4130 | 4129 | a hint. |
|
4131 | 4130 | ''' |
|
4132 | 4131 | after = howtocontinue(repo) |
|
4133 | 4132 | hint = None |
|
4134 | 4133 | if after[1]: |
|
4135 | 4134 | hint = after[0] |
|
4136 | 4135 | raise error.Abort(_(b'no %s in progress') % task, hint=hint) |
|
4137 | 4136 | |
|
4138 | 4137 | |
|
4139 | 4138 | def abortgraft(ui, repo, graftstate): |
|
4140 | 4139 | """abort the interrupted graft and rollbacks to the state before interrupted |
|
4141 | 4140 | graft""" |
|
4142 | 4141 | if not graftstate.exists(): |
|
4143 | 4142 | raise error.Abort(_(b"no interrupted graft to abort")) |
|
4144 | 4143 | statedata = readgraftstate(repo, graftstate) |
|
4145 | 4144 | newnodes = statedata.get(b'newnodes') |
|
4146 | 4145 | if newnodes is None: |
|
4147 | 4146 | # and old graft state which does not have all the data required to abort |
|
4148 | 4147 | # the graft |
|
4149 | 4148 | raise error.Abort(_(b"cannot abort using an old graftstate")) |
|
4150 | 4149 | |
|
4151 | 4150 | # changeset from which graft operation was started |
|
4152 | 4151 | if len(newnodes) > 0: |
|
4153 | 4152 | startctx = repo[newnodes[0]].p1() |
|
4154 | 4153 | else: |
|
4155 | 4154 | startctx = repo[b'.'] |
|
4156 | 4155 | # whether to strip or not |
|
4157 | 4156 | cleanup = False |
|
4158 | 4157 | from . import hg |
|
4159 | 4158 | |
|
4160 | 4159 | if newnodes: |
|
4161 | 4160 | newnodes = [repo[r].rev() for r in newnodes] |
|
4162 | 4161 | cleanup = True |
|
4163 | 4162 | # checking that none of the newnodes turned public or is public |
|
4164 | 4163 | immutable = [c for c in newnodes if not repo[c].mutable()] |
|
4165 | 4164 | if immutable: |
|
4166 | 4165 | repo.ui.warn( |
|
4167 | 4166 | _(b"cannot clean up public changesets %s\n") |
|
4168 | 4167 | % b', '.join(bytes(repo[r]) for r in immutable), |
|
4169 | 4168 | hint=_(b"see 'hg help phases' for details"), |
|
4170 | 4169 | ) |
|
4171 | 4170 | cleanup = False |
|
4172 | 4171 | |
|
4173 | 4172 | # checking that no new nodes are created on top of grafted revs |
|
4174 | 4173 | desc = set(repo.changelog.descendants(newnodes)) |
|
4175 | 4174 | if desc - set(newnodes): |
|
4176 | 4175 | repo.ui.warn( |
|
4177 | 4176 | _( |
|
4178 | 4177 | b"new changesets detected on destination " |
|
4179 | 4178 | b"branch, can't strip\n" |
|
4180 | 4179 | ) |
|
4181 | 4180 | ) |
|
4182 | 4181 | cleanup = False |
|
4183 | 4182 | |
|
4184 | 4183 | if cleanup: |
|
4185 | 4184 | with repo.wlock(), repo.lock(): |
|
4186 | 4185 | hg.updaterepo(repo, startctx.node(), overwrite=True) |
|
4187 | 4186 | # stripping the new nodes created |
|
4188 | 4187 | strippoints = [ |
|
4189 | 4188 | c.node() for c in repo.set(b"roots(%ld)", newnodes) |
|
4190 | 4189 | ] |
|
4191 | 4190 | repair.strip(repo.ui, repo, strippoints, backup=False) |
|
4192 | 4191 | |
|
4193 | 4192 | if not cleanup: |
|
4194 | 4193 | # we don't update to the startnode if we can't strip |
|
4195 | 4194 | startctx = repo[b'.'] |
|
4196 | 4195 | hg.updaterepo(repo, startctx.node(), overwrite=True) |
|
4197 | 4196 | |
|
4198 | 4197 | ui.status(_(b"graft aborted\n")) |
|
4199 | 4198 | ui.status(_(b"working directory is now at %s\n") % startctx.hex()[:12]) |
|
4200 | 4199 | graftstate.delete() |
|
4201 | 4200 | return 0 |
|
4202 | 4201 | |
|
4203 | 4202 | |
|
4204 | 4203 | def readgraftstate(repo, graftstate): |
|
4205 | 4204 | # type: (Any, statemod.cmdstate) -> Dict[bytes, Any] |
|
4206 | 4205 | """read the graft state file and return a dict of the data stored in it""" |
|
4207 | 4206 | try: |
|
4208 | 4207 | return graftstate.read() |
|
4209 | 4208 | except error.CorruptedState: |
|
4210 | 4209 | nodes = repo.vfs.read(b'graftstate').splitlines() |
|
4211 | 4210 | return {b'nodes': nodes} |
|
4212 | 4211 | |
|
4213 | 4212 | |
|
4214 | 4213 | def hgabortgraft(ui, repo): |
|
4215 | 4214 | """ abort logic for aborting graft using 'hg abort'""" |
|
4216 | 4215 | with repo.wlock(): |
|
4217 | 4216 | graftstate = statemod.cmdstate(repo, b'graftstate') |
|
4218 | 4217 | return abortgraft(ui, repo, graftstate) |
@@ -1,3157 +1,3157 | |||
|
1 | 1 | # exchange.py - utility to exchange data between repos. |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2005-2007 Matt Mackall <mpm@selenic.com> |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | from __future__ import absolute_import |
|
9 | 9 | |
|
10 | 10 | import collections |
|
11 | 11 | import weakref |
|
12 | 12 | |
|
13 | 13 | from .i18n import _ |
|
14 | 14 | from .node import ( |
|
15 | 15 | hex, |
|
16 | 16 | nullid, |
|
17 | 17 | nullrev, |
|
18 | 18 | ) |
|
19 | 19 | from .thirdparty import attr |
|
20 | 20 | from . import ( |
|
21 | 21 | bookmarks as bookmod, |
|
22 | 22 | bundle2, |
|
23 | 23 | changegroup, |
|
24 | 24 | discovery, |
|
25 | 25 | error, |
|
26 | 26 | exchangev2, |
|
27 | 27 | lock as lockmod, |
|
28 | 28 | logexchange, |
|
29 | 29 | narrowspec, |
|
30 | 30 | obsolete, |
|
31 | 31 | obsutil, |
|
32 | 32 | phases, |
|
33 | 33 | pushkey, |
|
34 | 34 | pycompat, |
|
35 | 35 | requirements, |
|
36 | 36 | scmutil, |
|
37 | 37 | sslutil, |
|
38 | 38 | streamclone, |
|
39 | 39 | url as urlmod, |
|
40 | 40 | util, |
|
41 | 41 | wireprototypes, |
|
42 | 42 | ) |
|
43 | 43 | from .utils import ( |
|
44 | 44 | hashutil, |
|
45 | 45 | stringutil, |
|
46 | 46 | ) |
|
47 | 47 | |
|
48 | 48 | urlerr = util.urlerr |
|
49 | 49 | urlreq = util.urlreq |
|
50 | 50 | |
|
51 | 51 | _NARROWACL_SECTION = b'narrowacl' |
|
52 | 52 | |
|
53 | 53 | # Maps bundle version human names to changegroup versions. |
|
54 | 54 | _bundlespeccgversions = { |
|
55 | 55 | b'v1': b'01', |
|
56 | 56 | b'v2': b'02', |
|
57 | 57 | b'packed1': b's1', |
|
58 | 58 | b'bundle2': b'02', # legacy |
|
59 | 59 | } |
|
60 | 60 | |
|
61 | 61 | # Maps bundle version with content opts to choose which part to bundle |
|
62 | 62 | _bundlespeccontentopts = { |
|
63 | 63 | b'v1': { |
|
64 | 64 | b'changegroup': True, |
|
65 | 65 | b'cg.version': b'01', |
|
66 | 66 | b'obsolescence': False, |
|
67 | 67 | b'phases': False, |
|
68 | 68 | b'tagsfnodescache': False, |
|
69 | 69 | b'revbranchcache': False, |
|
70 | 70 | }, |
|
71 | 71 | b'v2': { |
|
72 | 72 | b'changegroup': True, |
|
73 | 73 | b'cg.version': b'02', |
|
74 | 74 | b'obsolescence': False, |
|
75 | 75 | b'phases': False, |
|
76 | 76 | b'tagsfnodescache': True, |
|
77 | 77 | b'revbranchcache': True, |
|
78 | 78 | }, |
|
79 | 79 | b'packed1': {b'cg.version': b's1'}, |
|
80 | 80 | } |
|
81 | 81 | _bundlespeccontentopts[b'bundle2'] = _bundlespeccontentopts[b'v2'] |
|
82 | 82 | |
|
83 | 83 | _bundlespecvariants = { |
|
84 | 84 | b"streamv2": { |
|
85 | 85 | b"changegroup": False, |
|
86 | 86 | b"streamv2": True, |
|
87 | 87 | b"tagsfnodescache": False, |
|
88 | 88 | b"revbranchcache": False, |
|
89 | 89 | } |
|
90 | 90 | } |
|
91 | 91 | |
|
92 | 92 | # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE. |
|
93 | 93 | _bundlespecv1compengines = {b'gzip', b'bzip2', b'none'} |
|
94 | 94 | |
|
95 | 95 | |
|
96 | 96 | @attr.s |
|
97 | 97 | class bundlespec(object): |
|
98 | 98 | compression = attr.ib() |
|
99 | 99 | wirecompression = attr.ib() |
|
100 | 100 | version = attr.ib() |
|
101 | 101 | wireversion = attr.ib() |
|
102 | 102 | params = attr.ib() |
|
103 | 103 | contentopts = attr.ib() |
|
104 | 104 | |
|
105 | 105 | |
|
106 | 106 | def parsebundlespec(repo, spec, strict=True): |
|
107 | 107 | """Parse a bundle string specification into parts. |
|
108 | 108 | |
|
109 | 109 | Bundle specifications denote a well-defined bundle/exchange format. |
|
110 | 110 | The content of a given specification should not change over time in |
|
111 | 111 | order to ensure that bundles produced by a newer version of Mercurial are |
|
112 | 112 | readable from an older version. |
|
113 | 113 | |
|
114 | 114 | The string currently has the form: |
|
115 | 115 | |
|
116 | 116 | <compression>-<type>[;<parameter0>[;<parameter1>]] |
|
117 | 117 | |
|
118 | 118 | Where <compression> is one of the supported compression formats |
|
119 | 119 | and <type> is (currently) a version string. A ";" can follow the type and |
|
120 | 120 | all text afterwards is interpreted as URI encoded, ";" delimited key=value |
|
121 | 121 | pairs. |
|
122 | 122 | |
|
123 | 123 | If ``strict`` is True (the default) <compression> is required. Otherwise, |
|
124 | 124 | it is optional. |
|
125 | 125 | |
|
126 | 126 | Returns a bundlespec object of (compression, version, parameters). |
|
127 | 127 | Compression will be ``None`` if not in strict mode and a compression isn't |
|
128 | 128 | defined. |
|
129 | 129 | |
|
130 | 130 | An ``InvalidBundleSpecification`` is raised when the specification is |
|
131 | 131 | not syntactically well formed. |
|
132 | 132 | |
|
133 | 133 | An ``UnsupportedBundleSpecification`` is raised when the compression or |
|
134 | 134 | bundle type/version is not recognized. |
|
135 | 135 | |
|
136 | 136 | Note: this function will likely eventually return a more complex data |
|
137 | 137 | structure, including bundle2 part information. |
|
138 | 138 | """ |
|
139 | 139 | |
|
140 | 140 | def parseparams(s): |
|
141 | 141 | if b';' not in s: |
|
142 | 142 | return s, {} |
|
143 | 143 | |
|
144 | 144 | params = {} |
|
145 | 145 | version, paramstr = s.split(b';', 1) |
|
146 | 146 | |
|
147 | 147 | for p in paramstr.split(b';'): |
|
148 | 148 | if b'=' not in p: |
|
149 | 149 | raise error.InvalidBundleSpecification( |
|
150 | 150 | _( |
|
151 | 151 | b'invalid bundle specification: ' |
|
152 | 152 | b'missing "=" in parameter: %s' |
|
153 | 153 | ) |
|
154 | 154 | % p |
|
155 | 155 | ) |
|
156 | 156 | |
|
157 | 157 | key, value = p.split(b'=', 1) |
|
158 | 158 | key = urlreq.unquote(key) |
|
159 | 159 | value = urlreq.unquote(value) |
|
160 | 160 | params[key] = value |
|
161 | 161 | |
|
162 | 162 | return version, params |
|
163 | 163 | |
|
164 | 164 | if strict and b'-' not in spec: |
|
165 | 165 | raise error.InvalidBundleSpecification( |
|
166 | 166 | _( |
|
167 | 167 | b'invalid bundle specification; ' |
|
168 | 168 | b'must be prefixed with compression: %s' |
|
169 | 169 | ) |
|
170 | 170 | % spec |
|
171 | 171 | ) |
|
172 | 172 | |
|
173 | 173 | if b'-' in spec: |
|
174 | 174 | compression, version = spec.split(b'-', 1) |
|
175 | 175 | |
|
176 | 176 | if compression not in util.compengines.supportedbundlenames: |
|
177 | 177 | raise error.UnsupportedBundleSpecification( |
|
178 | 178 | _(b'%s compression is not supported') % compression |
|
179 | 179 | ) |
|
180 | 180 | |
|
181 | 181 | version, params = parseparams(version) |
|
182 | 182 | |
|
183 | 183 | if version not in _bundlespeccgversions: |
|
184 | 184 | raise error.UnsupportedBundleSpecification( |
|
185 | 185 | _(b'%s is not a recognized bundle version') % version |
|
186 | 186 | ) |
|
187 | 187 | else: |
|
188 | 188 | # Value could be just the compression or just the version, in which |
|
189 | 189 | # case some defaults are assumed (but only when not in strict mode). |
|
190 | 190 | assert not strict |
|
191 | 191 | |
|
192 | 192 | spec, params = parseparams(spec) |
|
193 | 193 | |
|
194 | 194 | if spec in util.compengines.supportedbundlenames: |
|
195 | 195 | compression = spec |
|
196 | 196 | version = b'v1' |
|
197 | 197 | # Generaldelta repos require v2. |
|
198 | 198 | if b'generaldelta' in repo.requirements: |
|
199 | 199 | version = b'v2' |
|
200 | 200 | # Modern compression engines require v2. |
|
201 | 201 | if compression not in _bundlespecv1compengines: |
|
202 | 202 | version = b'v2' |
|
203 | 203 | elif spec in _bundlespeccgversions: |
|
204 | 204 | if spec == b'packed1': |
|
205 | 205 | compression = b'none' |
|
206 | 206 | else: |
|
207 | 207 | compression = b'bzip2' |
|
208 | 208 | version = spec |
|
209 | 209 | else: |
|
210 | 210 | raise error.UnsupportedBundleSpecification( |
|
211 | 211 | _(b'%s is not a recognized bundle specification') % spec |
|
212 | 212 | ) |
|
213 | 213 | |
|
214 | 214 | # Bundle version 1 only supports a known set of compression engines. |
|
215 | 215 | if version == b'v1' and compression not in _bundlespecv1compengines: |
|
216 | 216 | raise error.UnsupportedBundleSpecification( |
|
217 | 217 | _(b'compression engine %s is not supported on v1 bundles') |
|
218 | 218 | % compression |
|
219 | 219 | ) |
|
220 | 220 | |
|
221 | 221 | # The specification for packed1 can optionally declare the data formats |
|
222 | 222 | # required to apply it. If we see this metadata, compare against what the |
|
223 | 223 | # repo supports and error if the bundle isn't compatible. |
|
224 | 224 | if version == b'packed1' and b'requirements' in params: |
|
225 | 225 | requirements = set(params[b'requirements'].split(b',')) |
|
226 | 226 | missingreqs = requirements - repo.supportedformats |
|
227 | 227 | if missingreqs: |
|
228 | 228 | raise error.UnsupportedBundleSpecification( |
|
229 | 229 | _(b'missing support for repository features: %s') |
|
230 | 230 | % b', '.join(sorted(missingreqs)) |
|
231 | 231 | ) |
|
232 | 232 | |
|
233 | 233 | # Compute contentopts based on the version |
|
234 | 234 | contentopts = _bundlespeccontentopts.get(version, {}).copy() |
|
235 | 235 | |
|
236 | 236 | # Process the variants |
|
237 | 237 | if b"stream" in params and params[b"stream"] == b"v2": |
|
238 | 238 | variant = _bundlespecvariants[b"streamv2"] |
|
239 | 239 | contentopts.update(variant) |
|
240 | 240 | |
|
241 | 241 | engine = util.compengines.forbundlename(compression) |
|
242 | 242 | compression, wirecompression = engine.bundletype() |
|
243 | 243 | wireversion = _bundlespeccgversions[version] |
|
244 | 244 | |
|
245 | 245 | return bundlespec( |
|
246 | 246 | compression, wirecompression, version, wireversion, params, contentopts |
|
247 | 247 | ) |
|
248 | 248 | |
|
249 | 249 | |
|
250 | 250 | def readbundle(ui, fh, fname, vfs=None): |
|
251 | 251 | header = changegroup.readexactly(fh, 4) |
|
252 | 252 | |
|
253 | 253 | alg = None |
|
254 | 254 | if not fname: |
|
255 | 255 | fname = b"stream" |
|
256 | 256 | if not header.startswith(b'HG') and header.startswith(b'\0'): |
|
257 | 257 | fh = changegroup.headerlessfixup(fh, header) |
|
258 | 258 | header = b"HG10" |
|
259 | 259 | alg = b'UN' |
|
260 | 260 | elif vfs: |
|
261 | 261 | fname = vfs.join(fname) |
|
262 | 262 | |
|
263 | 263 | magic, version = header[0:2], header[2:4] |
|
264 | 264 | |
|
265 | 265 | if magic != b'HG': |
|
266 | 266 | raise error.Abort(_(b'%s: not a Mercurial bundle') % fname) |
|
267 | 267 | if version == b'10': |
|
268 | 268 | if alg is None: |
|
269 | 269 | alg = changegroup.readexactly(fh, 2) |
|
270 | 270 | return changegroup.cg1unpacker(fh, alg) |
|
271 | 271 | elif version.startswith(b'2'): |
|
272 | 272 | return bundle2.getunbundler(ui, fh, magicstring=magic + version) |
|
273 | 273 | elif version == b'S1': |
|
274 | 274 | return streamclone.streamcloneapplier(fh) |
|
275 | 275 | else: |
|
276 | 276 | raise error.Abort( |
|
277 | 277 | _(b'%s: unknown bundle version %s') % (fname, version) |
|
278 | 278 | ) |
|
279 | 279 | |
|
280 | 280 | |
|
281 | 281 | def getbundlespec(ui, fh): |
|
282 | 282 | """Infer the bundlespec from a bundle file handle. |
|
283 | 283 | |
|
284 | 284 | The input file handle is seeked and the original seek position is not |
|
285 | 285 | restored. |
|
286 | 286 | """ |
|
287 | 287 | |
|
288 | 288 | def speccompression(alg): |
|
289 | 289 | try: |
|
290 | 290 | return util.compengines.forbundletype(alg).bundletype()[0] |
|
291 | 291 | except KeyError: |
|
292 | 292 | return None |
|
293 | 293 | |
|
294 | 294 | b = readbundle(ui, fh, None) |
|
295 | 295 | if isinstance(b, changegroup.cg1unpacker): |
|
296 | 296 | alg = b._type |
|
297 | 297 | if alg == b'_truncatedBZ': |
|
298 | 298 | alg = b'BZ' |
|
299 | 299 | comp = speccompression(alg) |
|
300 | 300 | if not comp: |
|
301 | 301 | raise error.Abort(_(b'unknown compression algorithm: %s') % alg) |
|
302 | 302 | return b'%s-v1' % comp |
|
303 | 303 | elif isinstance(b, bundle2.unbundle20): |
|
304 | 304 | if b'Compression' in b.params: |
|
305 | 305 | comp = speccompression(b.params[b'Compression']) |
|
306 | 306 | if not comp: |
|
307 | 307 | raise error.Abort( |
|
308 | 308 | _(b'unknown compression algorithm: %s') % comp |
|
309 | 309 | ) |
|
310 | 310 | else: |
|
311 | 311 | comp = b'none' |
|
312 | 312 | |
|
313 | 313 | version = None |
|
314 | 314 | for part in b.iterparts(): |
|
315 | 315 | if part.type == b'changegroup': |
|
316 | 316 | version = part.params[b'version'] |
|
317 | 317 | if version in (b'01', b'02'): |
|
318 | 318 | version = b'v2' |
|
319 | 319 | else: |
|
320 | 320 | raise error.Abort( |
|
321 | 321 | _( |
|
322 | 322 | b'changegroup version %s does not have ' |
|
323 | 323 | b'a known bundlespec' |
|
324 | 324 | ) |
|
325 | 325 | % version, |
|
326 | 326 | hint=_(b'try upgrading your Mercurial client'), |
|
327 | 327 | ) |
|
328 | 328 | elif part.type == b'stream2' and version is None: |
|
329 | 329 | # A stream2 part requires to be part of a v2 bundle |
|
330 | 330 | requirements = urlreq.unquote(part.params[b'requirements']) |
|
331 | 331 | splitted = requirements.split() |
|
332 | 332 | params = bundle2._formatrequirementsparams(splitted) |
|
333 | 333 | return b'none-v2;stream=v2;%s' % params |
|
334 | 334 | |
|
335 | 335 | if not version: |
|
336 | 336 | raise error.Abort( |
|
337 | 337 | _(b'could not identify changegroup version in bundle') |
|
338 | 338 | ) |
|
339 | 339 | |
|
340 | 340 | return b'%s-%s' % (comp, version) |
|
341 | 341 | elif isinstance(b, streamclone.streamcloneapplier): |
|
342 | 342 | requirements = streamclone.readbundle1header(fh)[2] |
|
343 | 343 | formatted = bundle2._formatrequirementsparams(requirements) |
|
344 | 344 | return b'none-packed1;%s' % formatted |
|
345 | 345 | else: |
|
346 | 346 | raise error.Abort(_(b'unknown bundle type: %s') % b) |
|
347 | 347 | |
|
348 | 348 | |
|
349 | 349 | def _computeoutgoing(repo, heads, common): |
|
350 | 350 | """Computes which revs are outgoing given a set of common |
|
351 | 351 | and a set of heads. |
|
352 | 352 | |
|
353 | 353 | This is a separate function so extensions can have access to |
|
354 | 354 | the logic. |
|
355 | 355 | |
|
356 | 356 | Returns a discovery.outgoing object. |
|
357 | 357 | """ |
|
358 | 358 | cl = repo.changelog |
|
359 | 359 | if common: |
|
360 | 360 | hasnode = cl.hasnode |
|
361 | 361 | common = [n for n in common if hasnode(n)] |
|
362 | 362 | else: |
|
363 | 363 | common = [nullid] |
|
364 | 364 | if not heads: |
|
365 | 365 | heads = cl.heads() |
|
366 | 366 | return discovery.outgoing(repo, common, heads) |
|
367 | 367 | |
|
368 | 368 | |
|
369 | 369 | def _checkpublish(pushop): |
|
370 | 370 | repo = pushop.repo |
|
371 | 371 | ui = repo.ui |
|
372 | 372 | behavior = ui.config(b'experimental', b'auto-publish') |
|
373 | 373 | if pushop.publish or behavior not in (b'warn', b'confirm', b'abort'): |
|
374 | 374 | return |
|
375 | 375 | remotephases = listkeys(pushop.remote, b'phases') |
|
376 | 376 | if not remotephases.get(b'publishing', False): |
|
377 | 377 | return |
|
378 | 378 | |
|
379 | 379 | if pushop.revs is None: |
|
380 | 380 | published = repo.filtered(b'served').revs(b'not public()') |
|
381 | 381 | else: |
|
382 | 382 | published = repo.revs(b'::%ln - public()', pushop.revs) |
|
383 | 383 | if published: |
|
384 | 384 | if behavior == b'warn': |
|
385 | 385 | ui.warn( |
|
386 | 386 | _(b'%i changesets about to be published\n') % len(published) |
|
387 | 387 | ) |
|
388 | 388 | elif behavior == b'confirm': |
|
389 | 389 | if ui.promptchoice( |
|
390 | 390 | _(b'push and publish %i changesets (yn)?$$ &Yes $$ &No') |
|
391 | 391 | % len(published) |
|
392 | 392 | ): |
|
393 | 393 | raise error.Abort(_(b'user quit')) |
|
394 | 394 | elif behavior == b'abort': |
|
395 | 395 | msg = _(b'push would publish %i changesets') % len(published) |
|
396 | 396 | hint = _( |
|
397 | 397 | b"use --publish or adjust 'experimental.auto-publish'" |
|
398 | 398 | b" config" |
|
399 | 399 | ) |
|
400 | 400 | raise error.Abort(msg, hint=hint) |
|
401 | 401 | |
|
402 | 402 | |
|
403 | 403 | def _forcebundle1(op): |
|
404 | 404 | """return true if a pull/push must use bundle1 |
|
405 | 405 | |
|
406 | 406 | This function is used to allow testing of the older bundle version""" |
|
407 | 407 | ui = op.repo.ui |
|
408 | 408 | # The goal is this config is to allow developer to choose the bundle |
|
409 | 409 | # version used during exchanged. This is especially handy during test. |
|
410 | 410 | # Value is a list of bundle version to be picked from, highest version |
|
411 | 411 | # should be used. |
|
412 | 412 | # |
|
413 | 413 | # developer config: devel.legacy.exchange |
|
414 | 414 | exchange = ui.configlist(b'devel', b'legacy.exchange') |
|
415 | 415 | forcebundle1 = b'bundle2' not in exchange and b'bundle1' in exchange |
|
416 | 416 | return forcebundle1 or not op.remote.capable(b'bundle2') |
|
417 | 417 | |
|
418 | 418 | |
|
419 | 419 | class pushoperation(object): |
|
420 | 420 | """A object that represent a single push operation |
|
421 | 421 | |
|
422 | 422 | Its purpose is to carry push related state and very common operations. |
|
423 | 423 | |
|
424 | 424 | A new pushoperation should be created at the beginning of each push and |
|
425 | 425 | discarded afterward. |
|
426 | 426 | """ |
|
427 | 427 | |
|
428 | 428 | def __init__( |
|
429 | 429 | self, |
|
430 | 430 | repo, |
|
431 | 431 | remote, |
|
432 | 432 | force=False, |
|
433 | 433 | revs=None, |
|
434 | 434 | newbranch=False, |
|
435 | 435 | bookmarks=(), |
|
436 | 436 | publish=False, |
|
437 | 437 | pushvars=None, |
|
438 | 438 | ): |
|
439 | 439 | # repo we push from |
|
440 | 440 | self.repo = repo |
|
441 | 441 | self.ui = repo.ui |
|
442 | 442 | # repo we push to |
|
443 | 443 | self.remote = remote |
|
444 | 444 | # force option provided |
|
445 | 445 | self.force = force |
|
446 | 446 | # revs to be pushed (None is "all") |
|
447 | 447 | self.revs = revs |
|
448 | 448 | # bookmark explicitly pushed |
|
449 | 449 | self.bookmarks = bookmarks |
|
450 | 450 | # allow push of new branch |
|
451 | 451 | self.newbranch = newbranch |
|
452 | 452 | # step already performed |
|
453 | 453 | # (used to check what steps have been already performed through bundle2) |
|
454 | 454 | self.stepsdone = set() |
|
455 | 455 | # Integer version of the changegroup push result |
|
456 | 456 | # - None means nothing to push |
|
457 | 457 | # - 0 means HTTP error |
|
458 | 458 | # - 1 means we pushed and remote head count is unchanged *or* |
|
459 | 459 | # we have outgoing changesets but refused to push |
|
460 | 460 | # - other values as described by addchangegroup() |
|
461 | 461 | self.cgresult = None |
|
462 | 462 | # Boolean value for the bookmark push |
|
463 | 463 | self.bkresult = None |
|
464 | 464 | # discover.outgoing object (contains common and outgoing data) |
|
465 | 465 | self.outgoing = None |
|
466 | 466 | # all remote topological heads before the push |
|
467 | 467 | self.remoteheads = None |
|
468 | 468 | # Details of the remote branch pre and post push |
|
469 | 469 | # |
|
470 | 470 | # mapping: {'branch': ([remoteheads], |
|
471 | 471 | # [newheads], |
|
472 | 472 | # [unsyncedheads], |
|
473 | 473 | # [discardedheads])} |
|
474 | 474 | # - branch: the branch name |
|
475 | 475 | # - remoteheads: the list of remote heads known locally |
|
476 | 476 | # None if the branch is new |
|
477 | 477 | # - newheads: the new remote heads (known locally) with outgoing pushed |
|
478 | 478 | # - unsyncedheads: the list of remote heads unknown locally. |
|
479 | 479 | # - discardedheads: the list of remote heads made obsolete by the push |
|
480 | 480 | self.pushbranchmap = None |
|
481 | 481 | # testable as a boolean indicating if any nodes are missing locally. |
|
482 | 482 | self.incoming = None |
|
483 | 483 | # summary of the remote phase situation |
|
484 | 484 | self.remotephases = None |
|
485 | 485 | # phases changes that must be pushed along side the changesets |
|
486 | 486 | self.outdatedphases = None |
|
487 | 487 | # phases changes that must be pushed if changeset push fails |
|
488 | 488 | self.fallbackoutdatedphases = None |
|
489 | 489 | # outgoing obsmarkers |
|
490 | 490 | self.outobsmarkers = set() |
|
491 | 491 | # outgoing bookmarks, list of (bm, oldnode | '', newnode | '') |
|
492 | 492 | self.outbookmarks = [] |
|
493 | 493 | # transaction manager |
|
494 | 494 | self.trmanager = None |
|
495 | 495 | # map { pushkey partid -> callback handling failure} |
|
496 | 496 | # used to handle exception from mandatory pushkey part failure |
|
497 | 497 | self.pkfailcb = {} |
|
498 | 498 | # an iterable of pushvars or None |
|
499 | 499 | self.pushvars = pushvars |
|
500 | 500 | # publish pushed changesets |
|
501 | 501 | self.publish = publish |
|
502 | 502 | |
|
503 | 503 | @util.propertycache |
|
504 | 504 | def futureheads(self): |
|
505 | 505 | """future remote heads if the changeset push succeeds""" |
|
506 | 506 | return self.outgoing.ancestorsof |
|
507 | 507 | |
|
508 | 508 | @util.propertycache |
|
509 | 509 | def fallbackheads(self): |
|
510 | 510 | """future remote heads if the changeset push fails""" |
|
511 | 511 | if self.revs is None: |
|
512 | 512 | # not target to push, all common are relevant |
|
513 | 513 | return self.outgoing.commonheads |
|
514 | 514 | unfi = self.repo.unfiltered() |
|
515 | 515 | # I want cheads = heads(::ancestorsof and ::commonheads) |
|
516 | 516 | # (ancestorsof is revs with secret changeset filtered out) |
|
517 | 517 | # |
|
518 | 518 | # This can be expressed as: |
|
519 | 519 | # cheads = ( (ancestorsof and ::commonheads) |
|
520 | 520 | # + (commonheads and ::ancestorsof))" |
|
521 | 521 | # ) |
|
522 | 522 | # |
|
523 | 523 | # while trying to push we already computed the following: |
|
524 | 524 | # common = (::commonheads) |
|
525 | 525 | # missing = ((commonheads::ancestorsof) - commonheads) |
|
526 | 526 | # |
|
527 | 527 | # We can pick: |
|
528 | 528 | # * ancestorsof part of common (::commonheads) |
|
529 | 529 | common = self.outgoing.common |
|
530 | 530 | rev = self.repo.changelog.index.rev |
|
531 | 531 | cheads = [node for node in self.revs if rev(node) in common] |
|
532 | 532 | # and |
|
533 | 533 | # * commonheads parents on missing |
|
534 | 534 | revset = unfi.set( |
|
535 | 535 | b'%ln and parents(roots(%ln))', |
|
536 | 536 | self.outgoing.commonheads, |
|
537 | 537 | self.outgoing.missing, |
|
538 | 538 | ) |
|
539 | 539 | cheads.extend(c.node() for c in revset) |
|
540 | 540 | return cheads |
|
541 | 541 | |
|
542 | 542 | @property |
|
543 | 543 | def commonheads(self): |
|
544 | 544 | """set of all common heads after changeset bundle push""" |
|
545 | 545 | if self.cgresult: |
|
546 | 546 | return self.futureheads |
|
547 | 547 | else: |
|
548 | 548 | return self.fallbackheads |
|
549 | 549 | |
|
550 | 550 | |
|
551 | 551 | # mapping of message used when pushing bookmark |
|
552 | 552 | bookmsgmap = { |
|
553 | 553 | b'update': ( |
|
554 | 554 | _(b"updating bookmark %s\n"), |
|
555 | 555 | _(b'updating bookmark %s failed!\n'), |
|
556 | 556 | ), |
|
557 | 557 | b'export': ( |
|
558 | 558 | _(b"exporting bookmark %s\n"), |
|
559 | 559 | _(b'exporting bookmark %s failed!\n'), |
|
560 | 560 | ), |
|
561 | 561 | b'delete': ( |
|
562 | 562 | _(b"deleting remote bookmark %s\n"), |
|
563 | 563 | _(b'deleting remote bookmark %s failed!\n'), |
|
564 | 564 | ), |
|
565 | 565 | } |
|
566 | 566 | |
|
567 | 567 | |
|
568 | 568 | def push( |
|
569 | 569 | repo, |
|
570 | 570 | remote, |
|
571 | 571 | force=False, |
|
572 | 572 | revs=None, |
|
573 | 573 | newbranch=False, |
|
574 | 574 | bookmarks=(), |
|
575 | 575 | publish=False, |
|
576 | 576 | opargs=None, |
|
577 | 577 | ): |
|
578 | 578 | '''Push outgoing changesets (limited by revs) from a local |
|
579 | 579 | repository to remote. Return an integer: |
|
580 | 580 | - None means nothing to push |
|
581 | 581 | - 0 means HTTP error |
|
582 | 582 | - 1 means we pushed and remote head count is unchanged *or* |
|
583 | 583 | we have outgoing changesets but refused to push |
|
584 | 584 | - other values as described by addchangegroup() |
|
585 | 585 | ''' |
|
586 | 586 | if opargs is None: |
|
587 | 587 | opargs = {} |
|
588 | 588 | pushop = pushoperation( |
|
589 | 589 | repo, |
|
590 | 590 | remote, |
|
591 | 591 | force, |
|
592 | 592 | revs, |
|
593 | 593 | newbranch, |
|
594 | 594 | bookmarks, |
|
595 | 595 | publish, |
|
596 | 596 | **pycompat.strkwargs(opargs) |
|
597 | 597 | ) |
|
598 | 598 | if pushop.remote.local(): |
|
599 | 599 | missing = ( |
|
600 | 600 | set(pushop.repo.requirements) - pushop.remote.local().supported |
|
601 | 601 | ) |
|
602 | 602 | if missing: |
|
603 | 603 | msg = _( |
|
604 | 604 | b"required features are not" |
|
605 | 605 | b" supported in the destination:" |
|
606 | 606 | b" %s" |
|
607 | 607 | ) % (b', '.join(sorted(missing))) |
|
608 | 608 | raise error.Abort(msg) |
|
609 | 609 | |
|
610 | 610 | if not pushop.remote.canpush(): |
|
611 | 611 | raise error.Abort(_(b"destination does not support push")) |
|
612 | 612 | |
|
613 | 613 | if not pushop.remote.capable(b'unbundle'): |
|
614 | 614 | raise error.Abort( |
|
615 | 615 | _( |
|
616 | 616 | b'cannot push: destination does not support the ' |
|
617 | 617 | b'unbundle wire protocol command' |
|
618 | 618 | ) |
|
619 | 619 | ) |
|
620 | 620 | |
|
621 | 621 | # get lock as we might write phase data |
|
622 | 622 | wlock = lock = None |
|
623 | 623 | try: |
|
624 | 624 | # bundle2 push may receive a reply bundle touching bookmarks |
|
625 | 625 | # requiring the wlock. Take it now to ensure proper ordering. |
|
626 | 626 | maypushback = pushop.ui.configbool(b'experimental', b'bundle2.pushback') |
|
627 | 627 | if ( |
|
628 | 628 | (not _forcebundle1(pushop)) |
|
629 | 629 | and maypushback |
|
630 | 630 | and not bookmod.bookmarksinstore(repo) |
|
631 | 631 | ): |
|
632 | 632 | wlock = pushop.repo.wlock() |
|
633 | 633 | lock = pushop.repo.lock() |
|
634 | 634 | pushop.trmanager = transactionmanager( |
|
635 | 635 | pushop.repo, b'push-response', pushop.remote.url() |
|
636 | 636 | ) |
|
637 | 637 | except error.LockUnavailable as err: |
|
638 | 638 | # source repo cannot be locked. |
|
639 | 639 | # We do not abort the push, but just disable the local phase |
|
640 | 640 | # synchronisation. |
|
641 | 641 | msg = b'cannot lock source repository: %s\n' % stringutil.forcebytestr( |
|
642 | 642 | err |
|
643 | 643 | ) |
|
644 | 644 | pushop.ui.debug(msg) |
|
645 | 645 | |
|
646 | 646 | with wlock or util.nullcontextmanager(): |
|
647 | 647 | with lock or util.nullcontextmanager(): |
|
648 | 648 | with pushop.trmanager or util.nullcontextmanager(): |
|
649 | 649 | pushop.repo.checkpush(pushop) |
|
650 | 650 | _checkpublish(pushop) |
|
651 | 651 | _pushdiscovery(pushop) |
|
652 | 652 | if not pushop.force: |
|
653 | 653 | _checksubrepostate(pushop) |
|
654 | 654 | if not _forcebundle1(pushop): |
|
655 | 655 | _pushbundle2(pushop) |
|
656 | 656 | _pushchangeset(pushop) |
|
657 | 657 | _pushsyncphase(pushop) |
|
658 | 658 | _pushobsolete(pushop) |
|
659 | 659 | _pushbookmark(pushop) |
|
660 | 660 | |
|
661 | 661 | if repo.ui.configbool(b'experimental', b'remotenames'): |
|
662 | 662 | logexchange.pullremotenames(repo, remote) |
|
663 | 663 | |
|
664 | 664 | return pushop |
|
665 | 665 | |
|
666 | 666 | |
|
667 | 667 | # list of steps to perform discovery before push |
|
668 | 668 | pushdiscoveryorder = [] |
|
669 | 669 | |
|
670 | 670 | # Mapping between step name and function |
|
671 | 671 | # |
|
672 | 672 | # This exists to help extensions wrap steps if necessary |
|
673 | 673 | pushdiscoverymapping = {} |
|
674 | 674 | |
|
675 | 675 | |
|
676 | 676 | def pushdiscovery(stepname): |
|
677 | 677 | """decorator for function performing discovery before push |
|
678 | 678 | |
|
679 | 679 | The function is added to the step -> function mapping and appended to the |
|
680 | 680 | list of steps. Beware that decorated function will be added in order (this |
|
681 | 681 | may matter). |
|
682 | 682 | |
|
683 | 683 | You can only use this decorator for a new step, if you want to wrap a step |
|
684 | 684 | from an extension, change the pushdiscovery dictionary directly.""" |
|
685 | 685 | |
|
686 | 686 | def dec(func): |
|
687 | 687 | assert stepname not in pushdiscoverymapping |
|
688 | 688 | pushdiscoverymapping[stepname] = func |
|
689 | 689 | pushdiscoveryorder.append(stepname) |
|
690 | 690 | return func |
|
691 | 691 | |
|
692 | 692 | return dec |
|
693 | 693 | |
|
694 | 694 | |
|
695 | 695 | def _pushdiscovery(pushop): |
|
696 | 696 | """Run all discovery steps""" |
|
697 | 697 | for stepname in pushdiscoveryorder: |
|
698 | 698 | step = pushdiscoverymapping[stepname] |
|
699 | 699 | step(pushop) |
|
700 | 700 | |
|
701 | 701 | |
|
702 | 702 | def _checksubrepostate(pushop): |
|
703 | 703 | """Ensure all outgoing referenced subrepo revisions are present locally""" |
|
704 | 704 | for n in pushop.outgoing.missing: |
|
705 | 705 | ctx = pushop.repo[n] |
|
706 | 706 | |
|
707 | 707 | if b'.hgsub' in ctx.manifest() and b'.hgsubstate' in ctx.files(): |
|
708 | 708 | for subpath in sorted(ctx.substate): |
|
709 | 709 | sub = ctx.sub(subpath) |
|
710 | 710 | sub.verify(onpush=True) |
|
711 | 711 | |
|
712 | 712 | |
|
713 | 713 | @pushdiscovery(b'changeset') |
|
714 | 714 | def _pushdiscoverychangeset(pushop): |
|
715 | 715 | """discover the changeset that need to be pushed""" |
|
716 | 716 | fci = discovery.findcommonincoming |
|
717 | 717 | if pushop.revs: |
|
718 | 718 | commoninc = fci( |
|
719 | 719 | pushop.repo, |
|
720 | 720 | pushop.remote, |
|
721 | 721 | force=pushop.force, |
|
722 | 722 | ancestorsof=pushop.revs, |
|
723 | 723 | ) |
|
724 | 724 | else: |
|
725 | 725 | commoninc = fci(pushop.repo, pushop.remote, force=pushop.force) |
|
726 | 726 | common, inc, remoteheads = commoninc |
|
727 | 727 | fco = discovery.findcommonoutgoing |
|
728 | 728 | outgoing = fco( |
|
729 | 729 | pushop.repo, |
|
730 | 730 | pushop.remote, |
|
731 | 731 | onlyheads=pushop.revs, |
|
732 | 732 | commoninc=commoninc, |
|
733 | 733 | force=pushop.force, |
|
734 | 734 | ) |
|
735 | 735 | pushop.outgoing = outgoing |
|
736 | 736 | pushop.remoteheads = remoteheads |
|
737 | 737 | pushop.incoming = inc |
|
738 | 738 | |
|
739 | 739 | |
|
740 | 740 | @pushdiscovery(b'phase') |
|
741 | 741 | def _pushdiscoveryphase(pushop): |
|
742 | 742 | """discover the phase that needs to be pushed |
|
743 | 743 | |
|
744 | 744 | (computed for both success and failure case for changesets push)""" |
|
745 | 745 | outgoing = pushop.outgoing |
|
746 | 746 | unfi = pushop.repo.unfiltered() |
|
747 | 747 | remotephases = listkeys(pushop.remote, b'phases') |
|
748 | 748 | |
|
749 | 749 | if ( |
|
750 | 750 | pushop.ui.configbool(b'ui', b'_usedassubrepo') |
|
751 | 751 | and remotephases # server supports phases |
|
752 | 752 | and not pushop.outgoing.missing # no changesets to be pushed |
|
753 | 753 | and remotephases.get(b'publishing', False) |
|
754 | 754 | ): |
|
755 | 755 | # When: |
|
756 | 756 | # - this is a subrepo push |
|
757 | 757 | # - and remote support phase |
|
758 | 758 | # - and no changeset are to be pushed |
|
759 | 759 | # - and remote is publishing |
|
760 | 760 | # We may be in issue 3781 case! |
|
761 | 761 | # We drop the possible phase synchronisation done by |
|
762 | 762 | # courtesy to publish changesets possibly locally draft |
|
763 | 763 | # on the remote. |
|
764 | 764 | pushop.outdatedphases = [] |
|
765 | 765 | pushop.fallbackoutdatedphases = [] |
|
766 | 766 | return |
|
767 | 767 | |
|
768 | 768 | pushop.remotephases = phases.remotephasessummary( |
|
769 | 769 | pushop.repo, pushop.fallbackheads, remotephases |
|
770 | 770 | ) |
|
771 | 771 | droots = pushop.remotephases.draftroots |
|
772 | 772 | |
|
773 | 773 | extracond = b'' |
|
774 | 774 | if not pushop.remotephases.publishing: |
|
775 | 775 | extracond = b' and public()' |
|
776 | 776 | revset = b'heads((%%ln::%%ln) %s)' % extracond |
|
777 | 777 | # Get the list of all revs draft on remote by public here. |
|
778 | 778 | # XXX Beware that revset break if droots is not strictly |
|
779 | 779 | # XXX root we may want to ensure it is but it is costly |
|
780 | 780 | fallback = list(unfi.set(revset, droots, pushop.fallbackheads)) |
|
781 | 781 | if not pushop.remotephases.publishing and pushop.publish: |
|
782 | 782 | future = list( |
|
783 | 783 | unfi.set( |
|
784 | 784 | b'%ln and (not public() or %ln::)', pushop.futureheads, droots |
|
785 | 785 | ) |
|
786 | 786 | ) |
|
787 | 787 | elif not outgoing.missing: |
|
788 | 788 | future = fallback |
|
789 | 789 | else: |
|
790 | 790 | # adds changeset we are going to push as draft |
|
791 | 791 | # |
|
792 | 792 | # should not be necessary for publishing server, but because of an |
|
793 | 793 | # issue fixed in xxxxx we have to do it anyway. |
|
794 | 794 | fdroots = list( |
|
795 | 795 | unfi.set(b'roots(%ln + %ln::)', outgoing.missing, droots) |
|
796 | 796 | ) |
|
797 | 797 | fdroots = [f.node() for f in fdroots] |
|
798 | 798 | future = list(unfi.set(revset, fdroots, pushop.futureheads)) |
|
799 | 799 | pushop.outdatedphases = future |
|
800 | 800 | pushop.fallbackoutdatedphases = fallback |
|
801 | 801 | |
|
802 | 802 | |
|
803 | 803 | @pushdiscovery(b'obsmarker') |
|
804 | 804 | def _pushdiscoveryobsmarkers(pushop): |
|
805 | 805 | if not obsolete.isenabled(pushop.repo, obsolete.exchangeopt): |
|
806 | 806 | return |
|
807 | 807 | |
|
808 | 808 | if not pushop.repo.obsstore: |
|
809 | 809 | return |
|
810 | 810 | |
|
811 | 811 | if b'obsolete' not in listkeys(pushop.remote, b'namespaces'): |
|
812 | 812 | return |
|
813 | 813 | |
|
814 | 814 | repo = pushop.repo |
|
815 | 815 | # very naive computation, that can be quite expensive on big repo. |
|
816 | 816 | # However: evolution is currently slow on them anyway. |
|
817 | 817 | nodes = (c.node() for c in repo.set(b'::%ln', pushop.futureheads)) |
|
818 | 818 | pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes) |
|
819 | 819 | |
|
820 | 820 | |
|
821 | 821 | @pushdiscovery(b'bookmarks') |
|
822 | 822 | def _pushdiscoverybookmarks(pushop): |
|
823 | 823 | ui = pushop.ui |
|
824 | 824 | repo = pushop.repo.unfiltered() |
|
825 | 825 | remote = pushop.remote |
|
826 | 826 | ui.debug(b"checking for updated bookmarks\n") |
|
827 | 827 | ancestors = () |
|
828 | 828 | if pushop.revs: |
|
829 | 829 | revnums = pycompat.maplist(repo.changelog.rev, pushop.revs) |
|
830 | 830 | ancestors = repo.changelog.ancestors(revnums, inclusive=True) |
|
831 | 831 | |
|
832 | 832 | remotebookmark = bookmod.unhexlifybookmarks(listkeys(remote, b'bookmarks')) |
|
833 | 833 | |
|
834 | 834 | explicit = { |
|
835 | 835 | repo._bookmarks.expandname(bookmark) for bookmark in pushop.bookmarks |
|
836 | 836 | } |
|
837 | 837 | |
|
838 | 838 | comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark) |
|
839 | 839 | return _processcompared(pushop, ancestors, explicit, remotebookmark, comp) |
|
840 | 840 | |
|
841 | 841 | |
|
842 | 842 | def _processcompared(pushop, pushed, explicit, remotebms, comp): |
|
843 | 843 | """take decision on bookmarks to push to the remote repo |
|
844 | 844 | |
|
845 | 845 | Exists to help extensions alter this behavior. |
|
846 | 846 | """ |
|
847 | 847 | addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp |
|
848 | 848 | |
|
849 | 849 | repo = pushop.repo |
|
850 | 850 | |
|
851 | 851 | for b, scid, dcid in advsrc: |
|
852 | 852 | if b in explicit: |
|
853 | 853 | explicit.remove(b) |
|
854 | 854 | if not pushed or repo[scid].rev() in pushed: |
|
855 | 855 | pushop.outbookmarks.append((b, dcid, scid)) |
|
856 | 856 | # search added bookmark |
|
857 | 857 | for b, scid, dcid in addsrc: |
|
858 | 858 | if b in explicit: |
|
859 | 859 | explicit.remove(b) |
|
860 | 860 | if bookmod.isdivergent(b): |
|
861 | 861 | pushop.ui.warn(_(b'cannot push divergent bookmark %s!\n') % b) |
|
862 | 862 | pushop.bkresult = 2 |
|
863 | 863 | else: |
|
864 | 864 | pushop.outbookmarks.append((b, b'', scid)) |
|
865 | 865 | # search for overwritten bookmark |
|
866 | 866 | for b, scid, dcid in list(advdst) + list(diverge) + list(differ): |
|
867 | 867 | if b in explicit: |
|
868 | 868 | explicit.remove(b) |
|
869 | 869 | pushop.outbookmarks.append((b, dcid, scid)) |
|
870 | 870 | # search for bookmark to delete |
|
871 | 871 | for b, scid, dcid in adddst: |
|
872 | 872 | if b in explicit: |
|
873 | 873 | explicit.remove(b) |
|
874 | 874 | # treat as "deleted locally" |
|
875 | 875 | pushop.outbookmarks.append((b, dcid, b'')) |
|
876 | 876 | # identical bookmarks shouldn't get reported |
|
877 | 877 | for b, scid, dcid in same: |
|
878 | 878 | if b in explicit: |
|
879 | 879 | explicit.remove(b) |
|
880 | 880 | |
|
881 | 881 | if explicit: |
|
882 | 882 | explicit = sorted(explicit) |
|
883 | 883 | # we should probably list all of them |
|
884 | 884 | pushop.ui.warn( |
|
885 | 885 | _( |
|
886 | 886 | b'bookmark %s does not exist on the local ' |
|
887 | 887 | b'or remote repository!\n' |
|
888 | 888 | ) |
|
889 | 889 | % explicit[0] |
|
890 | 890 | ) |
|
891 | 891 | pushop.bkresult = 2 |
|
892 | 892 | |
|
893 | 893 | pushop.outbookmarks.sort() |
|
894 | 894 | |
|
895 | 895 | |
|
896 | 896 | def _pushcheckoutgoing(pushop): |
|
897 | 897 | outgoing = pushop.outgoing |
|
898 | 898 | unfi = pushop.repo.unfiltered() |
|
899 | 899 | if not outgoing.missing: |
|
900 | 900 | # nothing to push |
|
901 | 901 | scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded) |
|
902 | 902 | return False |
|
903 | 903 | # something to push |
|
904 | 904 | if not pushop.force: |
|
905 | 905 | # if repo.obsstore == False --> no obsolete |
|
906 | 906 | # then, save the iteration |
|
907 | 907 | if unfi.obsstore: |
|
908 | 908 | # this message are here for 80 char limit reason |
|
909 | 909 | mso = _(b"push includes obsolete changeset: %s!") |
|
910 | 910 | mspd = _(b"push includes phase-divergent changeset: %s!") |
|
911 | 911 | mscd = _(b"push includes content-divergent changeset: %s!") |
|
912 | 912 | mst = { |
|
913 | 913 | b"orphan": _(b"push includes orphan changeset: %s!"), |
|
914 | 914 | b"phase-divergent": mspd, |
|
915 | 915 | b"content-divergent": mscd, |
|
916 | 916 | } |
|
917 | 917 | # If we are to push if there is at least one |
|
918 | 918 | # obsolete or unstable changeset in missing, at |
|
919 | 919 | # least one of the missinghead will be obsolete or |
|
920 | 920 | # unstable. So checking heads only is ok |
|
921 | 921 | for node in outgoing.ancestorsof: |
|
922 | 922 | ctx = unfi[node] |
|
923 | 923 | if ctx.obsolete(): |
|
924 | 924 | raise error.Abort(mso % ctx) |
|
925 | 925 | elif ctx.isunstable(): |
|
926 | 926 | # TODO print more than one instability in the abort |
|
927 | 927 | # message |
|
928 | 928 | raise error.Abort(mst[ctx.instabilities()[0]] % ctx) |
|
929 | 929 | |
|
930 | 930 | discovery.checkheads(pushop) |
|
931 | 931 | return True |
|
932 | 932 | |
|
933 | 933 | |
|
934 | 934 | # List of names of steps to perform for an outgoing bundle2, order matters. |
|
935 | 935 | b2partsgenorder = [] |
|
936 | 936 | |
|
937 | 937 | # Mapping between step name and function |
|
938 | 938 | # |
|
939 | 939 | # This exists to help extensions wrap steps if necessary |
|
940 | 940 | b2partsgenmapping = {} |
|
941 | 941 | |
|
942 | 942 | |
|
943 | 943 | def b2partsgenerator(stepname, idx=None): |
|
944 | 944 | """decorator for function generating bundle2 part |
|
945 | 945 | |
|
946 | 946 | The function is added to the step -> function mapping and appended to the |
|
947 | 947 | list of steps. Beware that decorated functions will be added in order |
|
948 | 948 | (this may matter). |
|
949 | 949 | |
|
950 | 950 | You can only use this decorator for new steps, if you want to wrap a step |
|
951 | 951 | from an extension, attack the b2partsgenmapping dictionary directly.""" |
|
952 | 952 | |
|
953 | 953 | def dec(func): |
|
954 | 954 | assert stepname not in b2partsgenmapping |
|
955 | 955 | b2partsgenmapping[stepname] = func |
|
956 | 956 | if idx is None: |
|
957 | 957 | b2partsgenorder.append(stepname) |
|
958 | 958 | else: |
|
959 | 959 | b2partsgenorder.insert(idx, stepname) |
|
960 | 960 | return func |
|
961 | 961 | |
|
962 | 962 | return dec |
|
963 | 963 | |
|
964 | 964 | |
|
965 | 965 | def _pushb2ctxcheckheads(pushop, bundler): |
|
966 | 966 | """Generate race condition checking parts |
|
967 | 967 | |
|
968 | 968 | Exists as an independent function to aid extensions |
|
969 | 969 | """ |
|
970 | 970 | # * 'force' do not check for push race, |
|
971 | 971 | # * if we don't push anything, there are nothing to check. |
|
972 | 972 | if not pushop.force and pushop.outgoing.ancestorsof: |
|
973 | 973 | allowunrelated = b'related' in bundler.capabilities.get( |
|
974 | 974 | b'checkheads', () |
|
975 | 975 | ) |
|
976 | 976 | emptyremote = pushop.pushbranchmap is None |
|
977 | 977 | if not allowunrelated or emptyremote: |
|
978 | 978 | bundler.newpart(b'check:heads', data=iter(pushop.remoteheads)) |
|
979 | 979 | else: |
|
980 | 980 | affected = set() |
|
981 | 981 | for branch, heads in pycompat.iteritems(pushop.pushbranchmap): |
|
982 | 982 | remoteheads, newheads, unsyncedheads, discardedheads = heads |
|
983 | 983 | if remoteheads is not None: |
|
984 | 984 | remote = set(remoteheads) |
|
985 | 985 | affected |= set(discardedheads) & remote |
|
986 | 986 | affected |= remote - set(newheads) |
|
987 | 987 | if affected: |
|
988 | 988 | data = iter(sorted(affected)) |
|
989 | 989 | bundler.newpart(b'check:updated-heads', data=data) |
|
990 | 990 | |
|
991 | 991 | |
|
992 | 992 | def _pushing(pushop): |
|
993 | 993 | """return True if we are pushing anything""" |
|
994 | 994 | return bool( |
|
995 | 995 | pushop.outgoing.missing |
|
996 | 996 | or pushop.outdatedphases |
|
997 | 997 | or pushop.outobsmarkers |
|
998 | 998 | or pushop.outbookmarks |
|
999 | 999 | ) |
|
1000 | 1000 | |
|
1001 | 1001 | |
|
1002 | 1002 | @b2partsgenerator(b'check-bookmarks') |
|
1003 | 1003 | def _pushb2checkbookmarks(pushop, bundler): |
|
1004 | 1004 | """insert bookmark move checking""" |
|
1005 | 1005 | if not _pushing(pushop) or pushop.force: |
|
1006 | 1006 | return |
|
1007 | 1007 | b2caps = bundle2.bundle2caps(pushop.remote) |
|
1008 | 1008 | hasbookmarkcheck = b'bookmarks' in b2caps |
|
1009 | 1009 | if not (pushop.outbookmarks and hasbookmarkcheck): |
|
1010 | 1010 | return |
|
1011 | 1011 | data = [] |
|
1012 | 1012 | for book, old, new in pushop.outbookmarks: |
|
1013 | 1013 | data.append((book, old)) |
|
1014 | 1014 | checkdata = bookmod.binaryencode(data) |
|
1015 | 1015 | bundler.newpart(b'check:bookmarks', data=checkdata) |
|
1016 | 1016 | |
|
1017 | 1017 | |
|
1018 | 1018 | @b2partsgenerator(b'check-phases') |
|
1019 | 1019 | def _pushb2checkphases(pushop, bundler): |
|
1020 | 1020 | """insert phase move checking""" |
|
1021 | 1021 | if not _pushing(pushop) or pushop.force: |
|
1022 | 1022 | return |
|
1023 | 1023 | b2caps = bundle2.bundle2caps(pushop.remote) |
|
1024 | 1024 | hasphaseheads = b'heads' in b2caps.get(b'phases', ()) |
|
1025 | 1025 | if pushop.remotephases is not None and hasphaseheads: |
|
1026 | 1026 | # check that the remote phase has not changed |
|
1027 | 1027 | checks = {p: [] for p in phases.allphases} |
|
1028 | 1028 | checks[phases.public].extend(pushop.remotephases.publicheads) |
|
1029 | 1029 | checks[phases.draft].extend(pushop.remotephases.draftroots) |
|
1030 | 1030 | if any(pycompat.itervalues(checks)): |
|
1031 | 1031 | for phase in checks: |
|
1032 | 1032 | checks[phase].sort() |
|
1033 | 1033 | checkdata = phases.binaryencode(checks) |
|
1034 | 1034 | bundler.newpart(b'check:phases', data=checkdata) |
|
1035 | 1035 | |
|
1036 | 1036 | |
|
1037 | 1037 | @b2partsgenerator(b'changeset') |
|
1038 | 1038 | def _pushb2ctx(pushop, bundler): |
|
1039 | 1039 | """handle changegroup push through bundle2 |
|
1040 | 1040 | |
|
1041 | 1041 | addchangegroup result is stored in the ``pushop.cgresult`` attribute. |
|
1042 | 1042 | """ |
|
1043 | 1043 | if b'changesets' in pushop.stepsdone: |
|
1044 | 1044 | return |
|
1045 | 1045 | pushop.stepsdone.add(b'changesets') |
|
1046 | 1046 | # Send known heads to the server for race detection. |
|
1047 | 1047 | if not _pushcheckoutgoing(pushop): |
|
1048 | 1048 | return |
|
1049 | 1049 | pushop.repo.prepushoutgoinghooks(pushop) |
|
1050 | 1050 | |
|
1051 | 1051 | _pushb2ctxcheckheads(pushop, bundler) |
|
1052 | 1052 | |
|
1053 | 1053 | b2caps = bundle2.bundle2caps(pushop.remote) |
|
1054 | 1054 | version = b'01' |
|
1055 | 1055 | cgversions = b2caps.get(b'changegroup') |
|
1056 | 1056 | if cgversions: # 3.1 and 3.2 ship with an empty value |
|
1057 | 1057 | cgversions = [ |
|
1058 | 1058 | v |
|
1059 | 1059 | for v in cgversions |
|
1060 | 1060 | if v in changegroup.supportedoutgoingversions(pushop.repo) |
|
1061 | 1061 | ] |
|
1062 | 1062 | if not cgversions: |
|
1063 | 1063 | raise error.Abort(_(b'no common changegroup version')) |
|
1064 | 1064 | version = max(cgversions) |
|
1065 | 1065 | cgstream = changegroup.makestream( |
|
1066 | 1066 | pushop.repo, pushop.outgoing, version, b'push' |
|
1067 | 1067 | ) |
|
1068 | 1068 | cgpart = bundler.newpart(b'changegroup', data=cgstream) |
|
1069 | 1069 | if cgversions: |
|
1070 | 1070 | cgpart.addparam(b'version', version) |
|
1071 | if requirements.TREEMANIFEST_REQUIREMENT in pushop.repo.requirements: | |
|
1071 | if scmutil.istreemanifest(pushop.repo): | |
|
1072 | 1072 | cgpart.addparam(b'treemanifest', b'1') |
|
1073 | 1073 | if b'exp-sidedata-flag' in pushop.repo.requirements: |
|
1074 | 1074 | cgpart.addparam(b'exp-sidedata', b'1') |
|
1075 | 1075 | |
|
1076 | 1076 | def handlereply(op): |
|
1077 | 1077 | """extract addchangegroup returns from server reply""" |
|
1078 | 1078 | cgreplies = op.records.getreplies(cgpart.id) |
|
1079 | 1079 | assert len(cgreplies[b'changegroup']) == 1 |
|
1080 | 1080 | pushop.cgresult = cgreplies[b'changegroup'][0][b'return'] |
|
1081 | 1081 | |
|
1082 | 1082 | return handlereply |
|
1083 | 1083 | |
|
1084 | 1084 | |
|
1085 | 1085 | @b2partsgenerator(b'phase') |
|
1086 | 1086 | def _pushb2phases(pushop, bundler): |
|
1087 | 1087 | """handle phase push through bundle2""" |
|
1088 | 1088 | if b'phases' in pushop.stepsdone: |
|
1089 | 1089 | return |
|
1090 | 1090 | b2caps = bundle2.bundle2caps(pushop.remote) |
|
1091 | 1091 | ui = pushop.repo.ui |
|
1092 | 1092 | |
|
1093 | 1093 | legacyphase = b'phases' in ui.configlist(b'devel', b'legacy.exchange') |
|
1094 | 1094 | haspushkey = b'pushkey' in b2caps |
|
1095 | 1095 | hasphaseheads = b'heads' in b2caps.get(b'phases', ()) |
|
1096 | 1096 | |
|
1097 | 1097 | if hasphaseheads and not legacyphase: |
|
1098 | 1098 | return _pushb2phaseheads(pushop, bundler) |
|
1099 | 1099 | elif haspushkey: |
|
1100 | 1100 | return _pushb2phasespushkey(pushop, bundler) |
|
1101 | 1101 | |
|
1102 | 1102 | |
|
1103 | 1103 | def _pushb2phaseheads(pushop, bundler): |
|
1104 | 1104 | """push phase information through a bundle2 - binary part""" |
|
1105 | 1105 | pushop.stepsdone.add(b'phases') |
|
1106 | 1106 | if pushop.outdatedphases: |
|
1107 | 1107 | updates = {p: [] for p in phases.allphases} |
|
1108 | 1108 | updates[0].extend(h.node() for h in pushop.outdatedphases) |
|
1109 | 1109 | phasedata = phases.binaryencode(updates) |
|
1110 | 1110 | bundler.newpart(b'phase-heads', data=phasedata) |
|
1111 | 1111 | |
|
1112 | 1112 | |
|
1113 | 1113 | def _pushb2phasespushkey(pushop, bundler): |
|
1114 | 1114 | """push phase information through a bundle2 - pushkey part""" |
|
1115 | 1115 | pushop.stepsdone.add(b'phases') |
|
1116 | 1116 | part2node = [] |
|
1117 | 1117 | |
|
1118 | 1118 | def handlefailure(pushop, exc): |
|
1119 | 1119 | targetid = int(exc.partid) |
|
1120 | 1120 | for partid, node in part2node: |
|
1121 | 1121 | if partid == targetid: |
|
1122 | 1122 | raise error.Abort(_(b'updating %s to public failed') % node) |
|
1123 | 1123 | |
|
1124 | 1124 | enc = pushkey.encode |
|
1125 | 1125 | for newremotehead in pushop.outdatedphases: |
|
1126 | 1126 | part = bundler.newpart(b'pushkey') |
|
1127 | 1127 | part.addparam(b'namespace', enc(b'phases')) |
|
1128 | 1128 | part.addparam(b'key', enc(newremotehead.hex())) |
|
1129 | 1129 | part.addparam(b'old', enc(b'%d' % phases.draft)) |
|
1130 | 1130 | part.addparam(b'new', enc(b'%d' % phases.public)) |
|
1131 | 1131 | part2node.append((part.id, newremotehead)) |
|
1132 | 1132 | pushop.pkfailcb[part.id] = handlefailure |
|
1133 | 1133 | |
|
1134 | 1134 | def handlereply(op): |
|
1135 | 1135 | for partid, node in part2node: |
|
1136 | 1136 | partrep = op.records.getreplies(partid) |
|
1137 | 1137 | results = partrep[b'pushkey'] |
|
1138 | 1138 | assert len(results) <= 1 |
|
1139 | 1139 | msg = None |
|
1140 | 1140 | if not results: |
|
1141 | 1141 | msg = _(b'server ignored update of %s to public!\n') % node |
|
1142 | 1142 | elif not int(results[0][b'return']): |
|
1143 | 1143 | msg = _(b'updating %s to public failed!\n') % node |
|
1144 | 1144 | if msg is not None: |
|
1145 | 1145 | pushop.ui.warn(msg) |
|
1146 | 1146 | |
|
1147 | 1147 | return handlereply |
|
1148 | 1148 | |
|
1149 | 1149 | |
|
1150 | 1150 | @b2partsgenerator(b'obsmarkers') |
|
1151 | 1151 | def _pushb2obsmarkers(pushop, bundler): |
|
1152 | 1152 | if b'obsmarkers' in pushop.stepsdone: |
|
1153 | 1153 | return |
|
1154 | 1154 | remoteversions = bundle2.obsmarkersversion(bundler.capabilities) |
|
1155 | 1155 | if obsolete.commonversion(remoteversions) is None: |
|
1156 | 1156 | return |
|
1157 | 1157 | pushop.stepsdone.add(b'obsmarkers') |
|
1158 | 1158 | if pushop.outobsmarkers: |
|
1159 | 1159 | markers = obsutil.sortedmarkers(pushop.outobsmarkers) |
|
1160 | 1160 | bundle2.buildobsmarkerspart(bundler, markers) |
|
1161 | 1161 | |
|
1162 | 1162 | |
|
1163 | 1163 | @b2partsgenerator(b'bookmarks') |
|
1164 | 1164 | def _pushb2bookmarks(pushop, bundler): |
|
1165 | 1165 | """handle bookmark push through bundle2""" |
|
1166 | 1166 | if b'bookmarks' in pushop.stepsdone: |
|
1167 | 1167 | return |
|
1168 | 1168 | b2caps = bundle2.bundle2caps(pushop.remote) |
|
1169 | 1169 | |
|
1170 | 1170 | legacy = pushop.repo.ui.configlist(b'devel', b'legacy.exchange') |
|
1171 | 1171 | legacybooks = b'bookmarks' in legacy |
|
1172 | 1172 | |
|
1173 | 1173 | if not legacybooks and b'bookmarks' in b2caps: |
|
1174 | 1174 | return _pushb2bookmarkspart(pushop, bundler) |
|
1175 | 1175 | elif b'pushkey' in b2caps: |
|
1176 | 1176 | return _pushb2bookmarkspushkey(pushop, bundler) |
|
1177 | 1177 | |
|
1178 | 1178 | |
|
1179 | 1179 | def _bmaction(old, new): |
|
1180 | 1180 | """small utility for bookmark pushing""" |
|
1181 | 1181 | if not old: |
|
1182 | 1182 | return b'export' |
|
1183 | 1183 | elif not new: |
|
1184 | 1184 | return b'delete' |
|
1185 | 1185 | return b'update' |
|
1186 | 1186 | |
|
1187 | 1187 | |
|
1188 | 1188 | def _abortonsecretctx(pushop, node, b): |
|
1189 | 1189 | """abort if a given bookmark points to a secret changeset""" |
|
1190 | 1190 | if node and pushop.repo[node].phase() == phases.secret: |
|
1191 | 1191 | raise error.Abort( |
|
1192 | 1192 | _(b'cannot push bookmark %s as it points to a secret changeset') % b |
|
1193 | 1193 | ) |
|
1194 | 1194 | |
|
1195 | 1195 | |
|
1196 | 1196 | def _pushb2bookmarkspart(pushop, bundler): |
|
1197 | 1197 | pushop.stepsdone.add(b'bookmarks') |
|
1198 | 1198 | if not pushop.outbookmarks: |
|
1199 | 1199 | return |
|
1200 | 1200 | |
|
1201 | 1201 | allactions = [] |
|
1202 | 1202 | data = [] |
|
1203 | 1203 | for book, old, new in pushop.outbookmarks: |
|
1204 | 1204 | _abortonsecretctx(pushop, new, book) |
|
1205 | 1205 | data.append((book, new)) |
|
1206 | 1206 | allactions.append((book, _bmaction(old, new))) |
|
1207 | 1207 | checkdata = bookmod.binaryencode(data) |
|
1208 | 1208 | bundler.newpart(b'bookmarks', data=checkdata) |
|
1209 | 1209 | |
|
1210 | 1210 | def handlereply(op): |
|
1211 | 1211 | ui = pushop.ui |
|
1212 | 1212 | # if success |
|
1213 | 1213 | for book, action in allactions: |
|
1214 | 1214 | ui.status(bookmsgmap[action][0] % book) |
|
1215 | 1215 | |
|
1216 | 1216 | return handlereply |
|
1217 | 1217 | |
|
1218 | 1218 | |
|
1219 | 1219 | def _pushb2bookmarkspushkey(pushop, bundler): |
|
1220 | 1220 | pushop.stepsdone.add(b'bookmarks') |
|
1221 | 1221 | part2book = [] |
|
1222 | 1222 | enc = pushkey.encode |
|
1223 | 1223 | |
|
1224 | 1224 | def handlefailure(pushop, exc): |
|
1225 | 1225 | targetid = int(exc.partid) |
|
1226 | 1226 | for partid, book, action in part2book: |
|
1227 | 1227 | if partid == targetid: |
|
1228 | 1228 | raise error.Abort(bookmsgmap[action][1].rstrip() % book) |
|
1229 | 1229 | # we should not be called for part we did not generated |
|
1230 | 1230 | assert False |
|
1231 | 1231 | |
|
1232 | 1232 | for book, old, new in pushop.outbookmarks: |
|
1233 | 1233 | _abortonsecretctx(pushop, new, book) |
|
1234 | 1234 | part = bundler.newpart(b'pushkey') |
|
1235 | 1235 | part.addparam(b'namespace', enc(b'bookmarks')) |
|
1236 | 1236 | part.addparam(b'key', enc(book)) |
|
1237 | 1237 | part.addparam(b'old', enc(hex(old))) |
|
1238 | 1238 | part.addparam(b'new', enc(hex(new))) |
|
1239 | 1239 | action = b'update' |
|
1240 | 1240 | if not old: |
|
1241 | 1241 | action = b'export' |
|
1242 | 1242 | elif not new: |
|
1243 | 1243 | action = b'delete' |
|
1244 | 1244 | part2book.append((part.id, book, action)) |
|
1245 | 1245 | pushop.pkfailcb[part.id] = handlefailure |
|
1246 | 1246 | |
|
1247 | 1247 | def handlereply(op): |
|
1248 | 1248 | ui = pushop.ui |
|
1249 | 1249 | for partid, book, action in part2book: |
|
1250 | 1250 | partrep = op.records.getreplies(partid) |
|
1251 | 1251 | results = partrep[b'pushkey'] |
|
1252 | 1252 | assert len(results) <= 1 |
|
1253 | 1253 | if not results: |
|
1254 | 1254 | pushop.ui.warn(_(b'server ignored bookmark %s update\n') % book) |
|
1255 | 1255 | else: |
|
1256 | 1256 | ret = int(results[0][b'return']) |
|
1257 | 1257 | if ret: |
|
1258 | 1258 | ui.status(bookmsgmap[action][0] % book) |
|
1259 | 1259 | else: |
|
1260 | 1260 | ui.warn(bookmsgmap[action][1] % book) |
|
1261 | 1261 | if pushop.bkresult is not None: |
|
1262 | 1262 | pushop.bkresult = 1 |
|
1263 | 1263 | |
|
1264 | 1264 | return handlereply |
|
1265 | 1265 | |
|
1266 | 1266 | |
|
1267 | 1267 | @b2partsgenerator(b'pushvars', idx=0) |
|
1268 | 1268 | def _getbundlesendvars(pushop, bundler): |
|
1269 | 1269 | '''send shellvars via bundle2''' |
|
1270 | 1270 | pushvars = pushop.pushvars |
|
1271 | 1271 | if pushvars: |
|
1272 | 1272 | shellvars = {} |
|
1273 | 1273 | for raw in pushvars: |
|
1274 | 1274 | if b'=' not in raw: |
|
1275 | 1275 | msg = ( |
|
1276 | 1276 | b"unable to parse variable '%s', should follow " |
|
1277 | 1277 | b"'KEY=VALUE' or 'KEY=' format" |
|
1278 | 1278 | ) |
|
1279 | 1279 | raise error.Abort(msg % raw) |
|
1280 | 1280 | k, v = raw.split(b'=', 1) |
|
1281 | 1281 | shellvars[k] = v |
|
1282 | 1282 | |
|
1283 | 1283 | part = bundler.newpart(b'pushvars') |
|
1284 | 1284 | |
|
1285 | 1285 | for key, value in pycompat.iteritems(shellvars): |
|
1286 | 1286 | part.addparam(key, value, mandatory=False) |
|
1287 | 1287 | |
|
1288 | 1288 | |
|
1289 | 1289 | def _pushbundle2(pushop): |
|
1290 | 1290 | """push data to the remote using bundle2 |
|
1291 | 1291 | |
|
1292 | 1292 | The only currently supported type of data is changegroup but this will |
|
1293 | 1293 | evolve in the future.""" |
|
1294 | 1294 | bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote)) |
|
1295 | 1295 | pushback = pushop.trmanager and pushop.ui.configbool( |
|
1296 | 1296 | b'experimental', b'bundle2.pushback' |
|
1297 | 1297 | ) |
|
1298 | 1298 | |
|
1299 | 1299 | # create reply capability |
|
1300 | 1300 | capsblob = bundle2.encodecaps( |
|
1301 | 1301 | bundle2.getrepocaps(pushop.repo, allowpushback=pushback, role=b'client') |
|
1302 | 1302 | ) |
|
1303 | 1303 | bundler.newpart(b'replycaps', data=capsblob) |
|
1304 | 1304 | replyhandlers = [] |
|
1305 | 1305 | for partgenname in b2partsgenorder: |
|
1306 | 1306 | partgen = b2partsgenmapping[partgenname] |
|
1307 | 1307 | ret = partgen(pushop, bundler) |
|
1308 | 1308 | if callable(ret): |
|
1309 | 1309 | replyhandlers.append(ret) |
|
1310 | 1310 | # do not push if nothing to push |
|
1311 | 1311 | if bundler.nbparts <= 1: |
|
1312 | 1312 | return |
|
1313 | 1313 | stream = util.chunkbuffer(bundler.getchunks()) |
|
1314 | 1314 | try: |
|
1315 | 1315 | try: |
|
1316 | 1316 | with pushop.remote.commandexecutor() as e: |
|
1317 | 1317 | reply = e.callcommand( |
|
1318 | 1318 | b'unbundle', |
|
1319 | 1319 | { |
|
1320 | 1320 | b'bundle': stream, |
|
1321 | 1321 | b'heads': [b'force'], |
|
1322 | 1322 | b'url': pushop.remote.url(), |
|
1323 | 1323 | }, |
|
1324 | 1324 | ).result() |
|
1325 | 1325 | except error.BundleValueError as exc: |
|
1326 | 1326 | raise error.Abort(_(b'missing support for %s') % exc) |
|
1327 | 1327 | try: |
|
1328 | 1328 | trgetter = None |
|
1329 | 1329 | if pushback: |
|
1330 | 1330 | trgetter = pushop.trmanager.transaction |
|
1331 | 1331 | op = bundle2.processbundle(pushop.repo, reply, trgetter) |
|
1332 | 1332 | except error.BundleValueError as exc: |
|
1333 | 1333 | raise error.Abort(_(b'missing support for %s') % exc) |
|
1334 | 1334 | except bundle2.AbortFromPart as exc: |
|
1335 | 1335 | pushop.ui.status(_(b'remote: %s\n') % exc) |
|
1336 | 1336 | if exc.hint is not None: |
|
1337 | 1337 | pushop.ui.status(_(b'remote: %s\n') % (b'(%s)' % exc.hint)) |
|
1338 | 1338 | raise error.Abort(_(b'push failed on remote')) |
|
1339 | 1339 | except error.PushkeyFailed as exc: |
|
1340 | 1340 | partid = int(exc.partid) |
|
1341 | 1341 | if partid not in pushop.pkfailcb: |
|
1342 | 1342 | raise |
|
1343 | 1343 | pushop.pkfailcb[partid](pushop, exc) |
|
1344 | 1344 | for rephand in replyhandlers: |
|
1345 | 1345 | rephand(op) |
|
1346 | 1346 | |
|
1347 | 1347 | |
|
1348 | 1348 | def _pushchangeset(pushop): |
|
1349 | 1349 | """Make the actual push of changeset bundle to remote repo""" |
|
1350 | 1350 | if b'changesets' in pushop.stepsdone: |
|
1351 | 1351 | return |
|
1352 | 1352 | pushop.stepsdone.add(b'changesets') |
|
1353 | 1353 | if not _pushcheckoutgoing(pushop): |
|
1354 | 1354 | return |
|
1355 | 1355 | |
|
1356 | 1356 | # Should have verified this in push(). |
|
1357 | 1357 | assert pushop.remote.capable(b'unbundle') |
|
1358 | 1358 | |
|
1359 | 1359 | pushop.repo.prepushoutgoinghooks(pushop) |
|
1360 | 1360 | outgoing = pushop.outgoing |
|
1361 | 1361 | # TODO: get bundlecaps from remote |
|
1362 | 1362 | bundlecaps = None |
|
1363 | 1363 | # create a changegroup from local |
|
1364 | 1364 | if pushop.revs is None and not ( |
|
1365 | 1365 | outgoing.excluded or pushop.repo.changelog.filteredrevs |
|
1366 | 1366 | ): |
|
1367 | 1367 | # push everything, |
|
1368 | 1368 | # use the fast path, no race possible on push |
|
1369 | 1369 | cg = changegroup.makechangegroup( |
|
1370 | 1370 | pushop.repo, |
|
1371 | 1371 | outgoing, |
|
1372 | 1372 | b'01', |
|
1373 | 1373 | b'push', |
|
1374 | 1374 | fastpath=True, |
|
1375 | 1375 | bundlecaps=bundlecaps, |
|
1376 | 1376 | ) |
|
1377 | 1377 | else: |
|
1378 | 1378 | cg = changegroup.makechangegroup( |
|
1379 | 1379 | pushop.repo, outgoing, b'01', b'push', bundlecaps=bundlecaps |
|
1380 | 1380 | ) |
|
1381 | 1381 | |
|
1382 | 1382 | # apply changegroup to remote |
|
1383 | 1383 | # local repo finds heads on server, finds out what |
|
1384 | 1384 | # revs it must push. once revs transferred, if server |
|
1385 | 1385 | # finds it has different heads (someone else won |
|
1386 | 1386 | # commit/push race), server aborts. |
|
1387 | 1387 | if pushop.force: |
|
1388 | 1388 | remoteheads = [b'force'] |
|
1389 | 1389 | else: |
|
1390 | 1390 | remoteheads = pushop.remoteheads |
|
1391 | 1391 | # ssh: return remote's addchangegroup() |
|
1392 | 1392 | # http: return remote's addchangegroup() or 0 for error |
|
1393 | 1393 | pushop.cgresult = pushop.remote.unbundle(cg, remoteheads, pushop.repo.url()) |
|
1394 | 1394 | |
|
1395 | 1395 | |
|
1396 | 1396 | def _pushsyncphase(pushop): |
|
1397 | 1397 | """synchronise phase information locally and remotely""" |
|
1398 | 1398 | cheads = pushop.commonheads |
|
1399 | 1399 | # even when we don't push, exchanging phase data is useful |
|
1400 | 1400 | remotephases = listkeys(pushop.remote, b'phases') |
|
1401 | 1401 | if ( |
|
1402 | 1402 | pushop.ui.configbool(b'ui', b'_usedassubrepo') |
|
1403 | 1403 | and remotephases # server supports phases |
|
1404 | 1404 | and pushop.cgresult is None # nothing was pushed |
|
1405 | 1405 | and remotephases.get(b'publishing', False) |
|
1406 | 1406 | ): |
|
1407 | 1407 | # When: |
|
1408 | 1408 | # - this is a subrepo push |
|
1409 | 1409 | # - and remote support phase |
|
1410 | 1410 | # - and no changeset was pushed |
|
1411 | 1411 | # - and remote is publishing |
|
1412 | 1412 | # We may be in issue 3871 case! |
|
1413 | 1413 | # We drop the possible phase synchronisation done by |
|
1414 | 1414 | # courtesy to publish changesets possibly locally draft |
|
1415 | 1415 | # on the remote. |
|
1416 | 1416 | remotephases = {b'publishing': b'True'} |
|
1417 | 1417 | if not remotephases: # old server or public only reply from non-publishing |
|
1418 | 1418 | _localphasemove(pushop, cheads) |
|
1419 | 1419 | # don't push any phase data as there is nothing to push |
|
1420 | 1420 | else: |
|
1421 | 1421 | ana = phases.analyzeremotephases(pushop.repo, cheads, remotephases) |
|
1422 | 1422 | pheads, droots = ana |
|
1423 | 1423 | ### Apply remote phase on local |
|
1424 | 1424 | if remotephases.get(b'publishing', False): |
|
1425 | 1425 | _localphasemove(pushop, cheads) |
|
1426 | 1426 | else: # publish = False |
|
1427 | 1427 | _localphasemove(pushop, pheads) |
|
1428 | 1428 | _localphasemove(pushop, cheads, phases.draft) |
|
1429 | 1429 | ### Apply local phase on remote |
|
1430 | 1430 | |
|
1431 | 1431 | if pushop.cgresult: |
|
1432 | 1432 | if b'phases' in pushop.stepsdone: |
|
1433 | 1433 | # phases already pushed though bundle2 |
|
1434 | 1434 | return |
|
1435 | 1435 | outdated = pushop.outdatedphases |
|
1436 | 1436 | else: |
|
1437 | 1437 | outdated = pushop.fallbackoutdatedphases |
|
1438 | 1438 | |
|
1439 | 1439 | pushop.stepsdone.add(b'phases') |
|
1440 | 1440 | |
|
1441 | 1441 | # filter heads already turned public by the push |
|
1442 | 1442 | outdated = [c for c in outdated if c.node() not in pheads] |
|
1443 | 1443 | # fallback to independent pushkey command |
|
1444 | 1444 | for newremotehead in outdated: |
|
1445 | 1445 | with pushop.remote.commandexecutor() as e: |
|
1446 | 1446 | r = e.callcommand( |
|
1447 | 1447 | b'pushkey', |
|
1448 | 1448 | { |
|
1449 | 1449 | b'namespace': b'phases', |
|
1450 | 1450 | b'key': newremotehead.hex(), |
|
1451 | 1451 | b'old': b'%d' % phases.draft, |
|
1452 | 1452 | b'new': b'%d' % phases.public, |
|
1453 | 1453 | }, |
|
1454 | 1454 | ).result() |
|
1455 | 1455 | |
|
1456 | 1456 | if not r: |
|
1457 | 1457 | pushop.ui.warn( |
|
1458 | 1458 | _(b'updating %s to public failed!\n') % newremotehead |
|
1459 | 1459 | ) |
|
1460 | 1460 | |
|
1461 | 1461 | |
|
1462 | 1462 | def _localphasemove(pushop, nodes, phase=phases.public): |
|
1463 | 1463 | """move <nodes> to <phase> in the local source repo""" |
|
1464 | 1464 | if pushop.trmanager: |
|
1465 | 1465 | phases.advanceboundary( |
|
1466 | 1466 | pushop.repo, pushop.trmanager.transaction(), phase, nodes |
|
1467 | 1467 | ) |
|
1468 | 1468 | else: |
|
1469 | 1469 | # repo is not locked, do not change any phases! |
|
1470 | 1470 | # Informs the user that phases should have been moved when |
|
1471 | 1471 | # applicable. |
|
1472 | 1472 | actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()] |
|
1473 | 1473 | phasestr = phases.phasenames[phase] |
|
1474 | 1474 | if actualmoves: |
|
1475 | 1475 | pushop.ui.status( |
|
1476 | 1476 | _( |
|
1477 | 1477 | b'cannot lock source repo, skipping ' |
|
1478 | 1478 | b'local %s phase update\n' |
|
1479 | 1479 | ) |
|
1480 | 1480 | % phasestr |
|
1481 | 1481 | ) |
|
1482 | 1482 | |
|
1483 | 1483 | |
|
1484 | 1484 | def _pushobsolete(pushop): |
|
1485 | 1485 | """utility function to push obsolete markers to a remote""" |
|
1486 | 1486 | if b'obsmarkers' in pushop.stepsdone: |
|
1487 | 1487 | return |
|
1488 | 1488 | repo = pushop.repo |
|
1489 | 1489 | remote = pushop.remote |
|
1490 | 1490 | pushop.stepsdone.add(b'obsmarkers') |
|
1491 | 1491 | if pushop.outobsmarkers: |
|
1492 | 1492 | pushop.ui.debug(b'try to push obsolete markers to remote\n') |
|
1493 | 1493 | rslts = [] |
|
1494 | 1494 | markers = obsutil.sortedmarkers(pushop.outobsmarkers) |
|
1495 | 1495 | remotedata = obsolete._pushkeyescape(markers) |
|
1496 | 1496 | for key in sorted(remotedata, reverse=True): |
|
1497 | 1497 | # reverse sort to ensure we end with dump0 |
|
1498 | 1498 | data = remotedata[key] |
|
1499 | 1499 | rslts.append(remote.pushkey(b'obsolete', key, b'', data)) |
|
1500 | 1500 | if [r for r in rslts if not r]: |
|
1501 | 1501 | msg = _(b'failed to push some obsolete markers!\n') |
|
1502 | 1502 | repo.ui.warn(msg) |
|
1503 | 1503 | |
|
1504 | 1504 | |
|
1505 | 1505 | def _pushbookmark(pushop): |
|
1506 | 1506 | """Update bookmark position on remote""" |
|
1507 | 1507 | if pushop.cgresult == 0 or b'bookmarks' in pushop.stepsdone: |
|
1508 | 1508 | return |
|
1509 | 1509 | pushop.stepsdone.add(b'bookmarks') |
|
1510 | 1510 | ui = pushop.ui |
|
1511 | 1511 | remote = pushop.remote |
|
1512 | 1512 | |
|
1513 | 1513 | for b, old, new in pushop.outbookmarks: |
|
1514 | 1514 | action = b'update' |
|
1515 | 1515 | if not old: |
|
1516 | 1516 | action = b'export' |
|
1517 | 1517 | elif not new: |
|
1518 | 1518 | action = b'delete' |
|
1519 | 1519 | |
|
1520 | 1520 | with remote.commandexecutor() as e: |
|
1521 | 1521 | r = e.callcommand( |
|
1522 | 1522 | b'pushkey', |
|
1523 | 1523 | { |
|
1524 | 1524 | b'namespace': b'bookmarks', |
|
1525 | 1525 | b'key': b, |
|
1526 | 1526 | b'old': hex(old), |
|
1527 | 1527 | b'new': hex(new), |
|
1528 | 1528 | }, |
|
1529 | 1529 | ).result() |
|
1530 | 1530 | |
|
1531 | 1531 | if r: |
|
1532 | 1532 | ui.status(bookmsgmap[action][0] % b) |
|
1533 | 1533 | else: |
|
1534 | 1534 | ui.warn(bookmsgmap[action][1] % b) |
|
1535 | 1535 | # discovery can have set the value form invalid entry |
|
1536 | 1536 | if pushop.bkresult is not None: |
|
1537 | 1537 | pushop.bkresult = 1 |
|
1538 | 1538 | |
|
1539 | 1539 | |
|
1540 | 1540 | class pulloperation(object): |
|
1541 | 1541 | """A object that represent a single pull operation |
|
1542 | 1542 | |
|
1543 | 1543 | It purpose is to carry pull related state and very common operation. |
|
1544 | 1544 | |
|
1545 | 1545 | A new should be created at the beginning of each pull and discarded |
|
1546 | 1546 | afterward. |
|
1547 | 1547 | """ |
|
1548 | 1548 | |
|
1549 | 1549 | def __init__( |
|
1550 | 1550 | self, |
|
1551 | 1551 | repo, |
|
1552 | 1552 | remote, |
|
1553 | 1553 | heads=None, |
|
1554 | 1554 | force=False, |
|
1555 | 1555 | bookmarks=(), |
|
1556 | 1556 | remotebookmarks=None, |
|
1557 | 1557 | streamclonerequested=None, |
|
1558 | 1558 | includepats=None, |
|
1559 | 1559 | excludepats=None, |
|
1560 | 1560 | depth=None, |
|
1561 | 1561 | ): |
|
1562 | 1562 | # repo we pull into |
|
1563 | 1563 | self.repo = repo |
|
1564 | 1564 | # repo we pull from |
|
1565 | 1565 | self.remote = remote |
|
1566 | 1566 | # revision we try to pull (None is "all") |
|
1567 | 1567 | self.heads = heads |
|
1568 | 1568 | # bookmark pulled explicitly |
|
1569 | 1569 | self.explicitbookmarks = [ |
|
1570 | 1570 | repo._bookmarks.expandname(bookmark) for bookmark in bookmarks |
|
1571 | 1571 | ] |
|
1572 | 1572 | # do we force pull? |
|
1573 | 1573 | self.force = force |
|
1574 | 1574 | # whether a streaming clone was requested |
|
1575 | 1575 | self.streamclonerequested = streamclonerequested |
|
1576 | 1576 | # transaction manager |
|
1577 | 1577 | self.trmanager = None |
|
1578 | 1578 | # set of common changeset between local and remote before pull |
|
1579 | 1579 | self.common = None |
|
1580 | 1580 | # set of pulled head |
|
1581 | 1581 | self.rheads = None |
|
1582 | 1582 | # list of missing changeset to fetch remotely |
|
1583 | 1583 | self.fetch = None |
|
1584 | 1584 | # remote bookmarks data |
|
1585 | 1585 | self.remotebookmarks = remotebookmarks |
|
1586 | 1586 | # result of changegroup pulling (used as return code by pull) |
|
1587 | 1587 | self.cgresult = None |
|
1588 | 1588 | # list of step already done |
|
1589 | 1589 | self.stepsdone = set() |
|
1590 | 1590 | # Whether we attempted a clone from pre-generated bundles. |
|
1591 | 1591 | self.clonebundleattempted = False |
|
1592 | 1592 | # Set of file patterns to include. |
|
1593 | 1593 | self.includepats = includepats |
|
1594 | 1594 | # Set of file patterns to exclude. |
|
1595 | 1595 | self.excludepats = excludepats |
|
1596 | 1596 | # Number of ancestor changesets to pull from each pulled head. |
|
1597 | 1597 | self.depth = depth |
|
1598 | 1598 | |
|
1599 | 1599 | @util.propertycache |
|
1600 | 1600 | def pulledsubset(self): |
|
1601 | 1601 | """heads of the set of changeset target by the pull""" |
|
1602 | 1602 | # compute target subset |
|
1603 | 1603 | if self.heads is None: |
|
1604 | 1604 | # We pulled every thing possible |
|
1605 | 1605 | # sync on everything common |
|
1606 | 1606 | c = set(self.common) |
|
1607 | 1607 | ret = list(self.common) |
|
1608 | 1608 | for n in self.rheads: |
|
1609 | 1609 | if n not in c: |
|
1610 | 1610 | ret.append(n) |
|
1611 | 1611 | return ret |
|
1612 | 1612 | else: |
|
1613 | 1613 | # We pulled a specific subset |
|
1614 | 1614 | # sync on this subset |
|
1615 | 1615 | return self.heads |
|
1616 | 1616 | |
|
1617 | 1617 | @util.propertycache |
|
1618 | 1618 | def canusebundle2(self): |
|
1619 | 1619 | return not _forcebundle1(self) |
|
1620 | 1620 | |
|
1621 | 1621 | @util.propertycache |
|
1622 | 1622 | def remotebundle2caps(self): |
|
1623 | 1623 | return bundle2.bundle2caps(self.remote) |
|
1624 | 1624 | |
|
1625 | 1625 | def gettransaction(self): |
|
1626 | 1626 | # deprecated; talk to trmanager directly |
|
1627 | 1627 | return self.trmanager.transaction() |
|
1628 | 1628 | |
|
1629 | 1629 | |
|
1630 | 1630 | class transactionmanager(util.transactional): |
|
1631 | 1631 | """An object to manage the life cycle of a transaction |
|
1632 | 1632 | |
|
1633 | 1633 | It creates the transaction on demand and calls the appropriate hooks when |
|
1634 | 1634 | closing the transaction.""" |
|
1635 | 1635 | |
|
1636 | 1636 | def __init__(self, repo, source, url): |
|
1637 | 1637 | self.repo = repo |
|
1638 | 1638 | self.source = source |
|
1639 | 1639 | self.url = url |
|
1640 | 1640 | self._tr = None |
|
1641 | 1641 | |
|
1642 | 1642 | def transaction(self): |
|
1643 | 1643 | """Return an open transaction object, constructing if necessary""" |
|
1644 | 1644 | if not self._tr: |
|
1645 | 1645 | trname = b'%s\n%s' % (self.source, util.hidepassword(self.url)) |
|
1646 | 1646 | self._tr = self.repo.transaction(trname) |
|
1647 | 1647 | self._tr.hookargs[b'source'] = self.source |
|
1648 | 1648 | self._tr.hookargs[b'url'] = self.url |
|
1649 | 1649 | return self._tr |
|
1650 | 1650 | |
|
1651 | 1651 | def close(self): |
|
1652 | 1652 | """close transaction if created""" |
|
1653 | 1653 | if self._tr is not None: |
|
1654 | 1654 | self._tr.close() |
|
1655 | 1655 | |
|
1656 | 1656 | def release(self): |
|
1657 | 1657 | """release transaction if created""" |
|
1658 | 1658 | if self._tr is not None: |
|
1659 | 1659 | self._tr.release() |
|
1660 | 1660 | |
|
1661 | 1661 | |
|
1662 | 1662 | def listkeys(remote, namespace): |
|
1663 | 1663 | with remote.commandexecutor() as e: |
|
1664 | 1664 | return e.callcommand(b'listkeys', {b'namespace': namespace}).result() |
|
1665 | 1665 | |
|
1666 | 1666 | |
|
1667 | 1667 | def _fullpullbundle2(repo, pullop): |
|
1668 | 1668 | # The server may send a partial reply, i.e. when inlining |
|
1669 | 1669 | # pre-computed bundles. In that case, update the common |
|
1670 | 1670 | # set based on the results and pull another bundle. |
|
1671 | 1671 | # |
|
1672 | 1672 | # There are two indicators that the process is finished: |
|
1673 | 1673 | # - no changeset has been added, or |
|
1674 | 1674 | # - all remote heads are known locally. |
|
1675 | 1675 | # The head check must use the unfiltered view as obsoletion |
|
1676 | 1676 | # markers can hide heads. |
|
1677 | 1677 | unfi = repo.unfiltered() |
|
1678 | 1678 | unficl = unfi.changelog |
|
1679 | 1679 | |
|
1680 | 1680 | def headsofdiff(h1, h2): |
|
1681 | 1681 | """Returns heads(h1 % h2)""" |
|
1682 | 1682 | res = unfi.set(b'heads(%ln %% %ln)', h1, h2) |
|
1683 | 1683 | return {ctx.node() for ctx in res} |
|
1684 | 1684 | |
|
1685 | 1685 | def headsofunion(h1, h2): |
|
1686 | 1686 | """Returns heads((h1 + h2) - null)""" |
|
1687 | 1687 | res = unfi.set(b'heads((%ln + %ln - null))', h1, h2) |
|
1688 | 1688 | return {ctx.node() for ctx in res} |
|
1689 | 1689 | |
|
1690 | 1690 | while True: |
|
1691 | 1691 | old_heads = unficl.heads() |
|
1692 | 1692 | clstart = len(unficl) |
|
1693 | 1693 | _pullbundle2(pullop) |
|
1694 | 1694 | if requirements.NARROW_REQUIREMENT in repo.requirements: |
|
1695 | 1695 | # XXX narrow clones filter the heads on the server side during |
|
1696 | 1696 | # XXX getbundle and result in partial replies as well. |
|
1697 | 1697 | # XXX Disable pull bundles in this case as band aid to avoid |
|
1698 | 1698 | # XXX extra round trips. |
|
1699 | 1699 | break |
|
1700 | 1700 | if clstart == len(unficl): |
|
1701 | 1701 | break |
|
1702 | 1702 | if all(unficl.hasnode(n) for n in pullop.rheads): |
|
1703 | 1703 | break |
|
1704 | 1704 | new_heads = headsofdiff(unficl.heads(), old_heads) |
|
1705 | 1705 | pullop.common = headsofunion(new_heads, pullop.common) |
|
1706 | 1706 | pullop.rheads = set(pullop.rheads) - pullop.common |
|
1707 | 1707 | |
|
1708 | 1708 | |
|
1709 | 1709 | def add_confirm_callback(repo, pullop): |
|
1710 | 1710 | """ adds a finalize callback to transaction which can be used to show stats |
|
1711 | 1711 | to user and confirm the pull before committing transaction """ |
|
1712 | 1712 | |
|
1713 | 1713 | tr = pullop.trmanager.transaction() |
|
1714 | 1714 | scmutil.registersummarycallback( |
|
1715 | 1715 | repo, tr, txnname=b'pull', as_validator=True |
|
1716 | 1716 | ) |
|
1717 | 1717 | reporef = weakref.ref(repo.unfiltered()) |
|
1718 | 1718 | |
|
1719 | 1719 | def prompt(tr): |
|
1720 | 1720 | repo = reporef() |
|
1721 | 1721 | cm = _(b'accept incoming changes (yn)?$$ &Yes $$ &No') |
|
1722 | 1722 | if repo.ui.promptchoice(cm): |
|
1723 | 1723 | raise error.Abort("user aborted") |
|
1724 | 1724 | |
|
1725 | 1725 | tr.addvalidator(b'900-pull-prompt', prompt) |
|
1726 | 1726 | |
|
1727 | 1727 | |
|
1728 | 1728 | def pull( |
|
1729 | 1729 | repo, |
|
1730 | 1730 | remote, |
|
1731 | 1731 | heads=None, |
|
1732 | 1732 | force=False, |
|
1733 | 1733 | bookmarks=(), |
|
1734 | 1734 | opargs=None, |
|
1735 | 1735 | streamclonerequested=None, |
|
1736 | 1736 | includepats=None, |
|
1737 | 1737 | excludepats=None, |
|
1738 | 1738 | depth=None, |
|
1739 | 1739 | confirm=None, |
|
1740 | 1740 | ): |
|
1741 | 1741 | """Fetch repository data from a remote. |
|
1742 | 1742 | |
|
1743 | 1743 | This is the main function used to retrieve data from a remote repository. |
|
1744 | 1744 | |
|
1745 | 1745 | ``repo`` is the local repository to clone into. |
|
1746 | 1746 | ``remote`` is a peer instance. |
|
1747 | 1747 | ``heads`` is an iterable of revisions we want to pull. ``None`` (the |
|
1748 | 1748 | default) means to pull everything from the remote. |
|
1749 | 1749 | ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By |
|
1750 | 1750 | default, all remote bookmarks are pulled. |
|
1751 | 1751 | ``opargs`` are additional keyword arguments to pass to ``pulloperation`` |
|
1752 | 1752 | initialization. |
|
1753 | 1753 | ``streamclonerequested`` is a boolean indicating whether a "streaming |
|
1754 | 1754 | clone" is requested. A "streaming clone" is essentially a raw file copy |
|
1755 | 1755 | of revlogs from the server. This only works when the local repository is |
|
1756 | 1756 | empty. The default value of ``None`` means to respect the server |
|
1757 | 1757 | configuration for preferring stream clones. |
|
1758 | 1758 | ``includepats`` and ``excludepats`` define explicit file patterns to |
|
1759 | 1759 | include and exclude in storage, respectively. If not defined, narrow |
|
1760 | 1760 | patterns from the repo instance are used, if available. |
|
1761 | 1761 | ``depth`` is an integer indicating the DAG depth of history we're |
|
1762 | 1762 | interested in. If defined, for each revision specified in ``heads``, we |
|
1763 | 1763 | will fetch up to this many of its ancestors and data associated with them. |
|
1764 | 1764 | ``confirm`` is a boolean indicating whether the pull should be confirmed |
|
1765 | 1765 | before committing the transaction. This overrides HGPLAIN. |
|
1766 | 1766 | |
|
1767 | 1767 | Returns the ``pulloperation`` created for this pull. |
|
1768 | 1768 | """ |
|
1769 | 1769 | if opargs is None: |
|
1770 | 1770 | opargs = {} |
|
1771 | 1771 | |
|
1772 | 1772 | # We allow the narrow patterns to be passed in explicitly to provide more |
|
1773 | 1773 | # flexibility for API consumers. |
|
1774 | 1774 | if includepats or excludepats: |
|
1775 | 1775 | includepats = includepats or set() |
|
1776 | 1776 | excludepats = excludepats or set() |
|
1777 | 1777 | else: |
|
1778 | 1778 | includepats, excludepats = repo.narrowpats |
|
1779 | 1779 | |
|
1780 | 1780 | narrowspec.validatepatterns(includepats) |
|
1781 | 1781 | narrowspec.validatepatterns(excludepats) |
|
1782 | 1782 | |
|
1783 | 1783 | pullop = pulloperation( |
|
1784 | 1784 | repo, |
|
1785 | 1785 | remote, |
|
1786 | 1786 | heads, |
|
1787 | 1787 | force, |
|
1788 | 1788 | bookmarks=bookmarks, |
|
1789 | 1789 | streamclonerequested=streamclonerequested, |
|
1790 | 1790 | includepats=includepats, |
|
1791 | 1791 | excludepats=excludepats, |
|
1792 | 1792 | depth=depth, |
|
1793 | 1793 | **pycompat.strkwargs(opargs) |
|
1794 | 1794 | ) |
|
1795 | 1795 | |
|
1796 | 1796 | peerlocal = pullop.remote.local() |
|
1797 | 1797 | if peerlocal: |
|
1798 | 1798 | missing = set(peerlocal.requirements) - pullop.repo.supported |
|
1799 | 1799 | if missing: |
|
1800 | 1800 | msg = _( |
|
1801 | 1801 | b"required features are not" |
|
1802 | 1802 | b" supported in the destination:" |
|
1803 | 1803 | b" %s" |
|
1804 | 1804 | ) % (b', '.join(sorted(missing))) |
|
1805 | 1805 | raise error.Abort(msg) |
|
1806 | 1806 | |
|
1807 | 1807 | pullop.trmanager = transactionmanager(repo, b'pull', remote.url()) |
|
1808 | 1808 | wlock = util.nullcontextmanager() |
|
1809 | 1809 | if not bookmod.bookmarksinstore(repo): |
|
1810 | 1810 | wlock = repo.wlock() |
|
1811 | 1811 | with wlock, repo.lock(), pullop.trmanager: |
|
1812 | 1812 | if confirm or ( |
|
1813 | 1813 | repo.ui.configbool(b"pull", b"confirm") and not repo.ui.plain() |
|
1814 | 1814 | ): |
|
1815 | 1815 | add_confirm_callback(repo, pullop) |
|
1816 | 1816 | |
|
1817 | 1817 | # Use the modern wire protocol, if available. |
|
1818 | 1818 | if remote.capable(b'command-changesetdata'): |
|
1819 | 1819 | exchangev2.pull(pullop) |
|
1820 | 1820 | else: |
|
1821 | 1821 | # This should ideally be in _pullbundle2(). However, it needs to run |
|
1822 | 1822 | # before discovery to avoid extra work. |
|
1823 | 1823 | _maybeapplyclonebundle(pullop) |
|
1824 | 1824 | streamclone.maybeperformlegacystreamclone(pullop) |
|
1825 | 1825 | _pulldiscovery(pullop) |
|
1826 | 1826 | if pullop.canusebundle2: |
|
1827 | 1827 | _fullpullbundle2(repo, pullop) |
|
1828 | 1828 | _pullchangeset(pullop) |
|
1829 | 1829 | _pullphase(pullop) |
|
1830 | 1830 | _pullbookmarks(pullop) |
|
1831 | 1831 | _pullobsolete(pullop) |
|
1832 | 1832 | |
|
1833 | 1833 | # storing remotenames |
|
1834 | 1834 | if repo.ui.configbool(b'experimental', b'remotenames'): |
|
1835 | 1835 | logexchange.pullremotenames(repo, remote) |
|
1836 | 1836 | |
|
1837 | 1837 | return pullop |
|
1838 | 1838 | |
|
1839 | 1839 | |
|
1840 | 1840 | # list of steps to perform discovery before pull |
|
1841 | 1841 | pulldiscoveryorder = [] |
|
1842 | 1842 | |
|
1843 | 1843 | # Mapping between step name and function |
|
1844 | 1844 | # |
|
1845 | 1845 | # This exists to help extensions wrap steps if necessary |
|
1846 | 1846 | pulldiscoverymapping = {} |
|
1847 | 1847 | |
|
1848 | 1848 | |
|
1849 | 1849 | def pulldiscovery(stepname): |
|
1850 | 1850 | """decorator for function performing discovery before pull |
|
1851 | 1851 | |
|
1852 | 1852 | The function is added to the step -> function mapping and appended to the |
|
1853 | 1853 | list of steps. Beware that decorated function will be added in order (this |
|
1854 | 1854 | may matter). |
|
1855 | 1855 | |
|
1856 | 1856 | You can only use this decorator for a new step, if you want to wrap a step |
|
1857 | 1857 | from an extension, change the pulldiscovery dictionary directly.""" |
|
1858 | 1858 | |
|
1859 | 1859 | def dec(func): |
|
1860 | 1860 | assert stepname not in pulldiscoverymapping |
|
1861 | 1861 | pulldiscoverymapping[stepname] = func |
|
1862 | 1862 | pulldiscoveryorder.append(stepname) |
|
1863 | 1863 | return func |
|
1864 | 1864 | |
|
1865 | 1865 | return dec |
|
1866 | 1866 | |
|
1867 | 1867 | |
|
1868 | 1868 | def _pulldiscovery(pullop): |
|
1869 | 1869 | """Run all discovery steps""" |
|
1870 | 1870 | for stepname in pulldiscoveryorder: |
|
1871 | 1871 | step = pulldiscoverymapping[stepname] |
|
1872 | 1872 | step(pullop) |
|
1873 | 1873 | |
|
1874 | 1874 | |
|
1875 | 1875 | @pulldiscovery(b'b1:bookmarks') |
|
1876 | 1876 | def _pullbookmarkbundle1(pullop): |
|
1877 | 1877 | """fetch bookmark data in bundle1 case |
|
1878 | 1878 | |
|
1879 | 1879 | If not using bundle2, we have to fetch bookmarks before changeset |
|
1880 | 1880 | discovery to reduce the chance and impact of race conditions.""" |
|
1881 | 1881 | if pullop.remotebookmarks is not None: |
|
1882 | 1882 | return |
|
1883 | 1883 | if pullop.canusebundle2 and b'listkeys' in pullop.remotebundle2caps: |
|
1884 | 1884 | # all known bundle2 servers now support listkeys, but lets be nice with |
|
1885 | 1885 | # new implementation. |
|
1886 | 1886 | return |
|
1887 | 1887 | books = listkeys(pullop.remote, b'bookmarks') |
|
1888 | 1888 | pullop.remotebookmarks = bookmod.unhexlifybookmarks(books) |
|
1889 | 1889 | |
|
1890 | 1890 | |
|
1891 | 1891 | @pulldiscovery(b'changegroup') |
|
1892 | 1892 | def _pulldiscoverychangegroup(pullop): |
|
1893 | 1893 | """discovery phase for the pull |
|
1894 | 1894 | |
|
1895 | 1895 | Current handle changeset discovery only, will change handle all discovery |
|
1896 | 1896 | at some point.""" |
|
1897 | 1897 | tmp = discovery.findcommonincoming( |
|
1898 | 1898 | pullop.repo, pullop.remote, heads=pullop.heads, force=pullop.force |
|
1899 | 1899 | ) |
|
1900 | 1900 | common, fetch, rheads = tmp |
|
1901 | 1901 | has_node = pullop.repo.unfiltered().changelog.index.has_node |
|
1902 | 1902 | if fetch and rheads: |
|
1903 | 1903 | # If a remote heads is filtered locally, put in back in common. |
|
1904 | 1904 | # |
|
1905 | 1905 | # This is a hackish solution to catch most of "common but locally |
|
1906 | 1906 | # hidden situation". We do not performs discovery on unfiltered |
|
1907 | 1907 | # repository because it end up doing a pathological amount of round |
|
1908 | 1908 | # trip for w huge amount of changeset we do not care about. |
|
1909 | 1909 | # |
|
1910 | 1910 | # If a set of such "common but filtered" changeset exist on the server |
|
1911 | 1911 | # but are not including a remote heads, we'll not be able to detect it, |
|
1912 | 1912 | scommon = set(common) |
|
1913 | 1913 | for n in rheads: |
|
1914 | 1914 | if has_node(n): |
|
1915 | 1915 | if n not in scommon: |
|
1916 | 1916 | common.append(n) |
|
1917 | 1917 | if set(rheads).issubset(set(common)): |
|
1918 | 1918 | fetch = [] |
|
1919 | 1919 | pullop.common = common |
|
1920 | 1920 | pullop.fetch = fetch |
|
1921 | 1921 | pullop.rheads = rheads |
|
1922 | 1922 | |
|
1923 | 1923 | |
|
1924 | 1924 | def _pullbundle2(pullop): |
|
1925 | 1925 | """pull data using bundle2 |
|
1926 | 1926 | |
|
1927 | 1927 | For now, the only supported data are changegroup.""" |
|
1928 | 1928 | kwargs = {b'bundlecaps': caps20to10(pullop.repo, role=b'client')} |
|
1929 | 1929 | |
|
1930 | 1930 | # make ui easier to access |
|
1931 | 1931 | ui = pullop.repo.ui |
|
1932 | 1932 | |
|
1933 | 1933 | # At the moment we don't do stream clones over bundle2. If that is |
|
1934 | 1934 | # implemented then here's where the check for that will go. |
|
1935 | 1935 | streaming = streamclone.canperformstreamclone(pullop, bundle2=True)[0] |
|
1936 | 1936 | |
|
1937 | 1937 | # declare pull perimeters |
|
1938 | 1938 | kwargs[b'common'] = pullop.common |
|
1939 | 1939 | kwargs[b'heads'] = pullop.heads or pullop.rheads |
|
1940 | 1940 | |
|
1941 | 1941 | # check server supports narrow and then adding includepats and excludepats |
|
1942 | 1942 | servernarrow = pullop.remote.capable(wireprototypes.NARROWCAP) |
|
1943 | 1943 | if servernarrow and pullop.includepats: |
|
1944 | 1944 | kwargs[b'includepats'] = pullop.includepats |
|
1945 | 1945 | if servernarrow and pullop.excludepats: |
|
1946 | 1946 | kwargs[b'excludepats'] = pullop.excludepats |
|
1947 | 1947 | |
|
1948 | 1948 | if streaming: |
|
1949 | 1949 | kwargs[b'cg'] = False |
|
1950 | 1950 | kwargs[b'stream'] = True |
|
1951 | 1951 | pullop.stepsdone.add(b'changegroup') |
|
1952 | 1952 | pullop.stepsdone.add(b'phases') |
|
1953 | 1953 | |
|
1954 | 1954 | else: |
|
1955 | 1955 | # pulling changegroup |
|
1956 | 1956 | pullop.stepsdone.add(b'changegroup') |
|
1957 | 1957 | |
|
1958 | 1958 | kwargs[b'cg'] = pullop.fetch |
|
1959 | 1959 | |
|
1960 | 1960 | legacyphase = b'phases' in ui.configlist(b'devel', b'legacy.exchange') |
|
1961 | 1961 | hasbinaryphase = b'heads' in pullop.remotebundle2caps.get(b'phases', ()) |
|
1962 | 1962 | if not legacyphase and hasbinaryphase: |
|
1963 | 1963 | kwargs[b'phases'] = True |
|
1964 | 1964 | pullop.stepsdone.add(b'phases') |
|
1965 | 1965 | |
|
1966 | 1966 | if b'listkeys' in pullop.remotebundle2caps: |
|
1967 | 1967 | if b'phases' not in pullop.stepsdone: |
|
1968 | 1968 | kwargs[b'listkeys'] = [b'phases'] |
|
1969 | 1969 | |
|
1970 | 1970 | bookmarksrequested = False |
|
1971 | 1971 | legacybookmark = b'bookmarks' in ui.configlist(b'devel', b'legacy.exchange') |
|
1972 | 1972 | hasbinarybook = b'bookmarks' in pullop.remotebundle2caps |
|
1973 | 1973 | |
|
1974 | 1974 | if pullop.remotebookmarks is not None: |
|
1975 | 1975 | pullop.stepsdone.add(b'request-bookmarks') |
|
1976 | 1976 | |
|
1977 | 1977 | if ( |
|
1978 | 1978 | b'request-bookmarks' not in pullop.stepsdone |
|
1979 | 1979 | and pullop.remotebookmarks is None |
|
1980 | 1980 | and not legacybookmark |
|
1981 | 1981 | and hasbinarybook |
|
1982 | 1982 | ): |
|
1983 | 1983 | kwargs[b'bookmarks'] = True |
|
1984 | 1984 | bookmarksrequested = True |
|
1985 | 1985 | |
|
1986 | 1986 | if b'listkeys' in pullop.remotebundle2caps: |
|
1987 | 1987 | if b'request-bookmarks' not in pullop.stepsdone: |
|
1988 | 1988 | # make sure to always includes bookmark data when migrating |
|
1989 | 1989 | # `hg incoming --bundle` to using this function. |
|
1990 | 1990 | pullop.stepsdone.add(b'request-bookmarks') |
|
1991 | 1991 | kwargs.setdefault(b'listkeys', []).append(b'bookmarks') |
|
1992 | 1992 | |
|
1993 | 1993 | # If this is a full pull / clone and the server supports the clone bundles |
|
1994 | 1994 | # feature, tell the server whether we attempted a clone bundle. The |
|
1995 | 1995 | # presence of this flag indicates the client supports clone bundles. This |
|
1996 | 1996 | # will enable the server to treat clients that support clone bundles |
|
1997 | 1997 | # differently from those that don't. |
|
1998 | 1998 | if ( |
|
1999 | 1999 | pullop.remote.capable(b'clonebundles') |
|
2000 | 2000 | and pullop.heads is None |
|
2001 | 2001 | and list(pullop.common) == [nullid] |
|
2002 | 2002 | ): |
|
2003 | 2003 | kwargs[b'cbattempted'] = pullop.clonebundleattempted |
|
2004 | 2004 | |
|
2005 | 2005 | if streaming: |
|
2006 | 2006 | pullop.repo.ui.status(_(b'streaming all changes\n')) |
|
2007 | 2007 | elif not pullop.fetch: |
|
2008 | 2008 | pullop.repo.ui.status(_(b"no changes found\n")) |
|
2009 | 2009 | pullop.cgresult = 0 |
|
2010 | 2010 | else: |
|
2011 | 2011 | if pullop.heads is None and list(pullop.common) == [nullid]: |
|
2012 | 2012 | pullop.repo.ui.status(_(b"requesting all changes\n")) |
|
2013 | 2013 | if obsolete.isenabled(pullop.repo, obsolete.exchangeopt): |
|
2014 | 2014 | remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps) |
|
2015 | 2015 | if obsolete.commonversion(remoteversions) is not None: |
|
2016 | 2016 | kwargs[b'obsmarkers'] = True |
|
2017 | 2017 | pullop.stepsdone.add(b'obsmarkers') |
|
2018 | 2018 | _pullbundle2extraprepare(pullop, kwargs) |
|
2019 | 2019 | |
|
2020 | 2020 | with pullop.remote.commandexecutor() as e: |
|
2021 | 2021 | args = dict(kwargs) |
|
2022 | 2022 | args[b'source'] = b'pull' |
|
2023 | 2023 | bundle = e.callcommand(b'getbundle', args).result() |
|
2024 | 2024 | |
|
2025 | 2025 | try: |
|
2026 | 2026 | op = bundle2.bundleoperation( |
|
2027 | 2027 | pullop.repo, pullop.gettransaction, source=b'pull' |
|
2028 | 2028 | ) |
|
2029 | 2029 | op.modes[b'bookmarks'] = b'records' |
|
2030 | 2030 | bundle2.processbundle(pullop.repo, bundle, op=op) |
|
2031 | 2031 | except bundle2.AbortFromPart as exc: |
|
2032 | 2032 | pullop.repo.ui.status(_(b'remote: abort: %s\n') % exc) |
|
2033 | 2033 | raise error.Abort(_(b'pull failed on remote'), hint=exc.hint) |
|
2034 | 2034 | except error.BundleValueError as exc: |
|
2035 | 2035 | raise error.Abort(_(b'missing support for %s') % exc) |
|
2036 | 2036 | |
|
2037 | 2037 | if pullop.fetch: |
|
2038 | 2038 | pullop.cgresult = bundle2.combinechangegroupresults(op) |
|
2039 | 2039 | |
|
2040 | 2040 | # processing phases change |
|
2041 | 2041 | for namespace, value in op.records[b'listkeys']: |
|
2042 | 2042 | if namespace == b'phases': |
|
2043 | 2043 | _pullapplyphases(pullop, value) |
|
2044 | 2044 | |
|
2045 | 2045 | # processing bookmark update |
|
2046 | 2046 | if bookmarksrequested: |
|
2047 | 2047 | books = {} |
|
2048 | 2048 | for record in op.records[b'bookmarks']: |
|
2049 | 2049 | books[record[b'bookmark']] = record[b"node"] |
|
2050 | 2050 | pullop.remotebookmarks = books |
|
2051 | 2051 | else: |
|
2052 | 2052 | for namespace, value in op.records[b'listkeys']: |
|
2053 | 2053 | if namespace == b'bookmarks': |
|
2054 | 2054 | pullop.remotebookmarks = bookmod.unhexlifybookmarks(value) |
|
2055 | 2055 | |
|
2056 | 2056 | # bookmark data were either already there or pulled in the bundle |
|
2057 | 2057 | if pullop.remotebookmarks is not None: |
|
2058 | 2058 | _pullbookmarks(pullop) |
|
2059 | 2059 | |
|
2060 | 2060 | |
|
2061 | 2061 | def _pullbundle2extraprepare(pullop, kwargs): |
|
2062 | 2062 | """hook function so that extensions can extend the getbundle call""" |
|
2063 | 2063 | |
|
2064 | 2064 | |
|
2065 | 2065 | def _pullchangeset(pullop): |
|
2066 | 2066 | """pull changeset from unbundle into the local repo""" |
|
2067 | 2067 | # We delay the open of the transaction as late as possible so we |
|
2068 | 2068 | # don't open transaction for nothing or you break future useful |
|
2069 | 2069 | # rollback call |
|
2070 | 2070 | if b'changegroup' in pullop.stepsdone: |
|
2071 | 2071 | return |
|
2072 | 2072 | pullop.stepsdone.add(b'changegroup') |
|
2073 | 2073 | if not pullop.fetch: |
|
2074 | 2074 | pullop.repo.ui.status(_(b"no changes found\n")) |
|
2075 | 2075 | pullop.cgresult = 0 |
|
2076 | 2076 | return |
|
2077 | 2077 | tr = pullop.gettransaction() |
|
2078 | 2078 | if pullop.heads is None and list(pullop.common) == [nullid]: |
|
2079 | 2079 | pullop.repo.ui.status(_(b"requesting all changes\n")) |
|
2080 | 2080 | elif pullop.heads is None and pullop.remote.capable(b'changegroupsubset'): |
|
2081 | 2081 | # issue1320, avoid a race if remote changed after discovery |
|
2082 | 2082 | pullop.heads = pullop.rheads |
|
2083 | 2083 | |
|
2084 | 2084 | if pullop.remote.capable(b'getbundle'): |
|
2085 | 2085 | # TODO: get bundlecaps from remote |
|
2086 | 2086 | cg = pullop.remote.getbundle( |
|
2087 | 2087 | b'pull', common=pullop.common, heads=pullop.heads or pullop.rheads |
|
2088 | 2088 | ) |
|
2089 | 2089 | elif pullop.heads is None: |
|
2090 | 2090 | with pullop.remote.commandexecutor() as e: |
|
2091 | 2091 | cg = e.callcommand( |
|
2092 | 2092 | b'changegroup', {b'nodes': pullop.fetch, b'source': b'pull',} |
|
2093 | 2093 | ).result() |
|
2094 | 2094 | |
|
2095 | 2095 | elif not pullop.remote.capable(b'changegroupsubset'): |
|
2096 | 2096 | raise error.Abort( |
|
2097 | 2097 | _( |
|
2098 | 2098 | b"partial pull cannot be done because " |
|
2099 | 2099 | b"other repository doesn't support " |
|
2100 | 2100 | b"changegroupsubset." |
|
2101 | 2101 | ) |
|
2102 | 2102 | ) |
|
2103 | 2103 | else: |
|
2104 | 2104 | with pullop.remote.commandexecutor() as e: |
|
2105 | 2105 | cg = e.callcommand( |
|
2106 | 2106 | b'changegroupsubset', |
|
2107 | 2107 | { |
|
2108 | 2108 | b'bases': pullop.fetch, |
|
2109 | 2109 | b'heads': pullop.heads, |
|
2110 | 2110 | b'source': b'pull', |
|
2111 | 2111 | }, |
|
2112 | 2112 | ).result() |
|
2113 | 2113 | |
|
2114 | 2114 | bundleop = bundle2.applybundle( |
|
2115 | 2115 | pullop.repo, cg, tr, b'pull', pullop.remote.url() |
|
2116 | 2116 | ) |
|
2117 | 2117 | pullop.cgresult = bundle2.combinechangegroupresults(bundleop) |
|
2118 | 2118 | |
|
2119 | 2119 | |
|
2120 | 2120 | def _pullphase(pullop): |
|
2121 | 2121 | # Get remote phases data from remote |
|
2122 | 2122 | if b'phases' in pullop.stepsdone: |
|
2123 | 2123 | return |
|
2124 | 2124 | remotephases = listkeys(pullop.remote, b'phases') |
|
2125 | 2125 | _pullapplyphases(pullop, remotephases) |
|
2126 | 2126 | |
|
2127 | 2127 | |
|
2128 | 2128 | def _pullapplyphases(pullop, remotephases): |
|
2129 | 2129 | """apply phase movement from observed remote state""" |
|
2130 | 2130 | if b'phases' in pullop.stepsdone: |
|
2131 | 2131 | return |
|
2132 | 2132 | pullop.stepsdone.add(b'phases') |
|
2133 | 2133 | publishing = bool(remotephases.get(b'publishing', False)) |
|
2134 | 2134 | if remotephases and not publishing: |
|
2135 | 2135 | # remote is new and non-publishing |
|
2136 | 2136 | pheads, _dr = phases.analyzeremotephases( |
|
2137 | 2137 | pullop.repo, pullop.pulledsubset, remotephases |
|
2138 | 2138 | ) |
|
2139 | 2139 | dheads = pullop.pulledsubset |
|
2140 | 2140 | else: |
|
2141 | 2141 | # Remote is old or publishing all common changesets |
|
2142 | 2142 | # should be seen as public |
|
2143 | 2143 | pheads = pullop.pulledsubset |
|
2144 | 2144 | dheads = [] |
|
2145 | 2145 | unfi = pullop.repo.unfiltered() |
|
2146 | 2146 | phase = unfi._phasecache.phase |
|
2147 | 2147 | rev = unfi.changelog.index.get_rev |
|
2148 | 2148 | public = phases.public |
|
2149 | 2149 | draft = phases.draft |
|
2150 | 2150 | |
|
2151 | 2151 | # exclude changesets already public locally and update the others |
|
2152 | 2152 | pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public] |
|
2153 | 2153 | if pheads: |
|
2154 | 2154 | tr = pullop.gettransaction() |
|
2155 | 2155 | phases.advanceboundary(pullop.repo, tr, public, pheads) |
|
2156 | 2156 | |
|
2157 | 2157 | # exclude changesets already draft locally and update the others |
|
2158 | 2158 | dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft] |
|
2159 | 2159 | if dheads: |
|
2160 | 2160 | tr = pullop.gettransaction() |
|
2161 | 2161 | phases.advanceboundary(pullop.repo, tr, draft, dheads) |
|
2162 | 2162 | |
|
2163 | 2163 | |
|
2164 | 2164 | def _pullbookmarks(pullop): |
|
2165 | 2165 | """process the remote bookmark information to update the local one""" |
|
2166 | 2166 | if b'bookmarks' in pullop.stepsdone: |
|
2167 | 2167 | return |
|
2168 | 2168 | pullop.stepsdone.add(b'bookmarks') |
|
2169 | 2169 | repo = pullop.repo |
|
2170 | 2170 | remotebookmarks = pullop.remotebookmarks |
|
2171 | 2171 | bookmod.updatefromremote( |
|
2172 | 2172 | repo.ui, |
|
2173 | 2173 | repo, |
|
2174 | 2174 | remotebookmarks, |
|
2175 | 2175 | pullop.remote.url(), |
|
2176 | 2176 | pullop.gettransaction, |
|
2177 | 2177 | explicit=pullop.explicitbookmarks, |
|
2178 | 2178 | ) |
|
2179 | 2179 | |
|
2180 | 2180 | |
|
2181 | 2181 | def _pullobsolete(pullop): |
|
2182 | 2182 | """utility function to pull obsolete markers from a remote |
|
2183 | 2183 | |
|
2184 | 2184 | The `gettransaction` is function that return the pull transaction, creating |
|
2185 | 2185 | one if necessary. We return the transaction to inform the calling code that |
|
2186 | 2186 | a new transaction have been created (when applicable). |
|
2187 | 2187 | |
|
2188 | 2188 | Exists mostly to allow overriding for experimentation purpose""" |
|
2189 | 2189 | if b'obsmarkers' in pullop.stepsdone: |
|
2190 | 2190 | return |
|
2191 | 2191 | pullop.stepsdone.add(b'obsmarkers') |
|
2192 | 2192 | tr = None |
|
2193 | 2193 | if obsolete.isenabled(pullop.repo, obsolete.exchangeopt): |
|
2194 | 2194 | pullop.repo.ui.debug(b'fetching remote obsolete markers\n') |
|
2195 | 2195 | remoteobs = listkeys(pullop.remote, b'obsolete') |
|
2196 | 2196 | if b'dump0' in remoteobs: |
|
2197 | 2197 | tr = pullop.gettransaction() |
|
2198 | 2198 | markers = [] |
|
2199 | 2199 | for key in sorted(remoteobs, reverse=True): |
|
2200 | 2200 | if key.startswith(b'dump'): |
|
2201 | 2201 | data = util.b85decode(remoteobs[key]) |
|
2202 | 2202 | version, newmarks = obsolete._readmarkers(data) |
|
2203 | 2203 | markers += newmarks |
|
2204 | 2204 | if markers: |
|
2205 | 2205 | pullop.repo.obsstore.add(tr, markers) |
|
2206 | 2206 | pullop.repo.invalidatevolatilesets() |
|
2207 | 2207 | return tr |
|
2208 | 2208 | |
|
2209 | 2209 | |
|
2210 | 2210 | def applynarrowacl(repo, kwargs): |
|
2211 | 2211 | """Apply narrow fetch access control. |
|
2212 | 2212 | |
|
2213 | 2213 | This massages the named arguments for getbundle wire protocol commands |
|
2214 | 2214 | so requested data is filtered through access control rules. |
|
2215 | 2215 | """ |
|
2216 | 2216 | ui = repo.ui |
|
2217 | 2217 | # TODO this assumes existence of HTTP and is a layering violation. |
|
2218 | 2218 | username = ui.shortuser(ui.environ.get(b'REMOTE_USER') or ui.username()) |
|
2219 | 2219 | user_includes = ui.configlist( |
|
2220 | 2220 | _NARROWACL_SECTION, |
|
2221 | 2221 | username + b'.includes', |
|
2222 | 2222 | ui.configlist(_NARROWACL_SECTION, b'default.includes'), |
|
2223 | 2223 | ) |
|
2224 | 2224 | user_excludes = ui.configlist( |
|
2225 | 2225 | _NARROWACL_SECTION, |
|
2226 | 2226 | username + b'.excludes', |
|
2227 | 2227 | ui.configlist(_NARROWACL_SECTION, b'default.excludes'), |
|
2228 | 2228 | ) |
|
2229 | 2229 | if not user_includes: |
|
2230 | 2230 | raise error.Abort( |
|
2231 | 2231 | _(b"%s configuration for user %s is empty") |
|
2232 | 2232 | % (_NARROWACL_SECTION, username) |
|
2233 | 2233 | ) |
|
2234 | 2234 | |
|
2235 | 2235 | user_includes = [ |
|
2236 | 2236 | b'path:.' if p == b'*' else b'path:' + p for p in user_includes |
|
2237 | 2237 | ] |
|
2238 | 2238 | user_excludes = [ |
|
2239 | 2239 | b'path:.' if p == b'*' else b'path:' + p for p in user_excludes |
|
2240 | 2240 | ] |
|
2241 | 2241 | |
|
2242 | 2242 | req_includes = set(kwargs.get('includepats', [])) |
|
2243 | 2243 | req_excludes = set(kwargs.get('excludepats', [])) |
|
2244 | 2244 | |
|
2245 | 2245 | req_includes, req_excludes, invalid_includes = narrowspec.restrictpatterns( |
|
2246 | 2246 | req_includes, req_excludes, user_includes, user_excludes |
|
2247 | 2247 | ) |
|
2248 | 2248 | |
|
2249 | 2249 | if invalid_includes: |
|
2250 | 2250 | raise error.Abort( |
|
2251 | 2251 | _(b"The following includes are not accessible for %s: %s") |
|
2252 | 2252 | % (username, stringutil.pprint(invalid_includes)) |
|
2253 | 2253 | ) |
|
2254 | 2254 | |
|
2255 | 2255 | new_args = {} |
|
2256 | 2256 | new_args.update(kwargs) |
|
2257 | 2257 | new_args['narrow'] = True |
|
2258 | 2258 | new_args['narrow_acl'] = True |
|
2259 | 2259 | new_args['includepats'] = req_includes |
|
2260 | 2260 | if req_excludes: |
|
2261 | 2261 | new_args['excludepats'] = req_excludes |
|
2262 | 2262 | |
|
2263 | 2263 | return new_args |
|
2264 | 2264 | |
|
2265 | 2265 | |
|
2266 | 2266 | def _computeellipsis(repo, common, heads, known, match, depth=None): |
|
2267 | 2267 | """Compute the shape of a narrowed DAG. |
|
2268 | 2268 | |
|
2269 | 2269 | Args: |
|
2270 | 2270 | repo: The repository we're transferring. |
|
2271 | 2271 | common: The roots of the DAG range we're transferring. |
|
2272 | 2272 | May be just [nullid], which means all ancestors of heads. |
|
2273 | 2273 | heads: The heads of the DAG range we're transferring. |
|
2274 | 2274 | match: The narrowmatcher that allows us to identify relevant changes. |
|
2275 | 2275 | depth: If not None, only consider nodes to be full nodes if they are at |
|
2276 | 2276 | most depth changesets away from one of heads. |
|
2277 | 2277 | |
|
2278 | 2278 | Returns: |
|
2279 | 2279 | A tuple of (visitnodes, relevant_nodes, ellipsisroots) where: |
|
2280 | 2280 | |
|
2281 | 2281 | visitnodes: The list of nodes (either full or ellipsis) which |
|
2282 | 2282 | need to be sent to the client. |
|
2283 | 2283 | relevant_nodes: The set of changelog nodes which change a file inside |
|
2284 | 2284 | the narrowspec. The client needs these as non-ellipsis nodes. |
|
2285 | 2285 | ellipsisroots: A dict of {rev: parents} that is used in |
|
2286 | 2286 | narrowchangegroup to produce ellipsis nodes with the |
|
2287 | 2287 | correct parents. |
|
2288 | 2288 | """ |
|
2289 | 2289 | cl = repo.changelog |
|
2290 | 2290 | mfl = repo.manifestlog |
|
2291 | 2291 | |
|
2292 | 2292 | clrev = cl.rev |
|
2293 | 2293 | |
|
2294 | 2294 | commonrevs = {clrev(n) for n in common} | {nullrev} |
|
2295 | 2295 | headsrevs = {clrev(n) for n in heads} |
|
2296 | 2296 | |
|
2297 | 2297 | if depth: |
|
2298 | 2298 | revdepth = {h: 0 for h in headsrevs} |
|
2299 | 2299 | |
|
2300 | 2300 | ellipsisheads = collections.defaultdict(set) |
|
2301 | 2301 | ellipsisroots = collections.defaultdict(set) |
|
2302 | 2302 | |
|
2303 | 2303 | def addroot(head, curchange): |
|
2304 | 2304 | """Add a root to an ellipsis head, splitting heads with 3 roots.""" |
|
2305 | 2305 | ellipsisroots[head].add(curchange) |
|
2306 | 2306 | # Recursively split ellipsis heads with 3 roots by finding the |
|
2307 | 2307 | # roots' youngest common descendant which is an elided merge commit. |
|
2308 | 2308 | # That descendant takes 2 of the 3 roots as its own, and becomes a |
|
2309 | 2309 | # root of the head. |
|
2310 | 2310 | while len(ellipsisroots[head]) > 2: |
|
2311 | 2311 | child, roots = splithead(head) |
|
2312 | 2312 | splitroots(head, child, roots) |
|
2313 | 2313 | head = child # Recurse in case we just added a 3rd root |
|
2314 | 2314 | |
|
2315 | 2315 | def splitroots(head, child, roots): |
|
2316 | 2316 | ellipsisroots[head].difference_update(roots) |
|
2317 | 2317 | ellipsisroots[head].add(child) |
|
2318 | 2318 | ellipsisroots[child].update(roots) |
|
2319 | 2319 | ellipsisroots[child].discard(child) |
|
2320 | 2320 | |
|
2321 | 2321 | def splithead(head): |
|
2322 | 2322 | r1, r2, r3 = sorted(ellipsisroots[head]) |
|
2323 | 2323 | for nr1, nr2 in ((r2, r3), (r1, r3), (r1, r2)): |
|
2324 | 2324 | mid = repo.revs( |
|
2325 | 2325 | b'sort(merge() & %d::%d & %d::%d, -rev)', nr1, head, nr2, head |
|
2326 | 2326 | ) |
|
2327 | 2327 | for j in mid: |
|
2328 | 2328 | if j == nr2: |
|
2329 | 2329 | return nr2, (nr1, nr2) |
|
2330 | 2330 | if j not in ellipsisroots or len(ellipsisroots[j]) < 2: |
|
2331 | 2331 | return j, (nr1, nr2) |
|
2332 | 2332 | raise error.Abort( |
|
2333 | 2333 | _( |
|
2334 | 2334 | b'Failed to split up ellipsis node! head: %d, ' |
|
2335 | 2335 | b'roots: %d %d %d' |
|
2336 | 2336 | ) |
|
2337 | 2337 | % (head, r1, r2, r3) |
|
2338 | 2338 | ) |
|
2339 | 2339 | |
|
2340 | 2340 | missing = list(cl.findmissingrevs(common=commonrevs, heads=headsrevs)) |
|
2341 | 2341 | visit = reversed(missing) |
|
2342 | 2342 | relevant_nodes = set() |
|
2343 | 2343 | visitnodes = [cl.node(m) for m in missing] |
|
2344 | 2344 | required = set(headsrevs) | known |
|
2345 | 2345 | for rev in visit: |
|
2346 | 2346 | clrev = cl.changelogrevision(rev) |
|
2347 | 2347 | ps = [prev for prev in cl.parentrevs(rev) if prev != nullrev] |
|
2348 | 2348 | if depth is not None: |
|
2349 | 2349 | curdepth = revdepth[rev] |
|
2350 | 2350 | for p in ps: |
|
2351 | 2351 | revdepth[p] = min(curdepth + 1, revdepth.get(p, depth + 1)) |
|
2352 | 2352 | needed = False |
|
2353 | 2353 | shallow_enough = depth is None or revdepth[rev] <= depth |
|
2354 | 2354 | if shallow_enough: |
|
2355 | 2355 | curmf = mfl[clrev.manifest].read() |
|
2356 | 2356 | if ps: |
|
2357 | 2357 | # We choose to not trust the changed files list in |
|
2358 | 2358 | # changesets because it's not always correct. TODO: could |
|
2359 | 2359 | # we trust it for the non-merge case? |
|
2360 | 2360 | p1mf = mfl[cl.changelogrevision(ps[0]).manifest].read() |
|
2361 | 2361 | needed = bool(curmf.diff(p1mf, match)) |
|
2362 | 2362 | if not needed and len(ps) > 1: |
|
2363 | 2363 | # For merge changes, the list of changed files is not |
|
2364 | 2364 | # helpful, since we need to emit the merge if a file |
|
2365 | 2365 | # in the narrow spec has changed on either side of the |
|
2366 | 2366 | # merge. As a result, we do a manifest diff to check. |
|
2367 | 2367 | p2mf = mfl[cl.changelogrevision(ps[1]).manifest].read() |
|
2368 | 2368 | needed = bool(curmf.diff(p2mf, match)) |
|
2369 | 2369 | else: |
|
2370 | 2370 | # For a root node, we need to include the node if any |
|
2371 | 2371 | # files in the node match the narrowspec. |
|
2372 | 2372 | needed = any(curmf.walk(match)) |
|
2373 | 2373 | |
|
2374 | 2374 | if needed: |
|
2375 | 2375 | for head in ellipsisheads[rev]: |
|
2376 | 2376 | addroot(head, rev) |
|
2377 | 2377 | for p in ps: |
|
2378 | 2378 | required.add(p) |
|
2379 | 2379 | relevant_nodes.add(cl.node(rev)) |
|
2380 | 2380 | else: |
|
2381 | 2381 | if not ps: |
|
2382 | 2382 | ps = [nullrev] |
|
2383 | 2383 | if rev in required: |
|
2384 | 2384 | for head in ellipsisheads[rev]: |
|
2385 | 2385 | addroot(head, rev) |
|
2386 | 2386 | for p in ps: |
|
2387 | 2387 | ellipsisheads[p].add(rev) |
|
2388 | 2388 | else: |
|
2389 | 2389 | for p in ps: |
|
2390 | 2390 | ellipsisheads[p] |= ellipsisheads[rev] |
|
2391 | 2391 | |
|
2392 | 2392 | # add common changesets as roots of their reachable ellipsis heads |
|
2393 | 2393 | for c in commonrevs: |
|
2394 | 2394 | for head in ellipsisheads[c]: |
|
2395 | 2395 | addroot(head, c) |
|
2396 | 2396 | return visitnodes, relevant_nodes, ellipsisroots |
|
2397 | 2397 | |
|
2398 | 2398 | |
|
2399 | 2399 | def caps20to10(repo, role): |
|
2400 | 2400 | """return a set with appropriate options to use bundle20 during getbundle""" |
|
2401 | 2401 | caps = {b'HG20'} |
|
2402 | 2402 | capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo, role=role)) |
|
2403 | 2403 | caps.add(b'bundle2=' + urlreq.quote(capsblob)) |
|
2404 | 2404 | return caps |
|
2405 | 2405 | |
|
2406 | 2406 | |
|
2407 | 2407 | # List of names of steps to perform for a bundle2 for getbundle, order matters. |
|
2408 | 2408 | getbundle2partsorder = [] |
|
2409 | 2409 | |
|
2410 | 2410 | # Mapping between step name and function |
|
2411 | 2411 | # |
|
2412 | 2412 | # This exists to help extensions wrap steps if necessary |
|
2413 | 2413 | getbundle2partsmapping = {} |
|
2414 | 2414 | |
|
2415 | 2415 | |
|
2416 | 2416 | def getbundle2partsgenerator(stepname, idx=None): |
|
2417 | 2417 | """decorator for function generating bundle2 part for getbundle |
|
2418 | 2418 | |
|
2419 | 2419 | The function is added to the step -> function mapping and appended to the |
|
2420 | 2420 | list of steps. Beware that decorated functions will be added in order |
|
2421 | 2421 | (this may matter). |
|
2422 | 2422 | |
|
2423 | 2423 | You can only use this decorator for new steps, if you want to wrap a step |
|
2424 | 2424 | from an extension, attack the getbundle2partsmapping dictionary directly.""" |
|
2425 | 2425 | |
|
2426 | 2426 | def dec(func): |
|
2427 | 2427 | assert stepname not in getbundle2partsmapping |
|
2428 | 2428 | getbundle2partsmapping[stepname] = func |
|
2429 | 2429 | if idx is None: |
|
2430 | 2430 | getbundle2partsorder.append(stepname) |
|
2431 | 2431 | else: |
|
2432 | 2432 | getbundle2partsorder.insert(idx, stepname) |
|
2433 | 2433 | return func |
|
2434 | 2434 | |
|
2435 | 2435 | return dec |
|
2436 | 2436 | |
|
2437 | 2437 | |
|
2438 | 2438 | def bundle2requested(bundlecaps): |
|
2439 | 2439 | if bundlecaps is not None: |
|
2440 | 2440 | return any(cap.startswith(b'HG2') for cap in bundlecaps) |
|
2441 | 2441 | return False |
|
2442 | 2442 | |
|
2443 | 2443 | |
|
2444 | 2444 | def getbundlechunks( |
|
2445 | 2445 | repo, source, heads=None, common=None, bundlecaps=None, **kwargs |
|
2446 | 2446 | ): |
|
2447 | 2447 | """Return chunks constituting a bundle's raw data. |
|
2448 | 2448 | |
|
2449 | 2449 | Could be a bundle HG10 or a bundle HG20 depending on bundlecaps |
|
2450 | 2450 | passed. |
|
2451 | 2451 | |
|
2452 | 2452 | Returns a 2-tuple of a dict with metadata about the generated bundle |
|
2453 | 2453 | and an iterator over raw chunks (of varying sizes). |
|
2454 | 2454 | """ |
|
2455 | 2455 | kwargs = pycompat.byteskwargs(kwargs) |
|
2456 | 2456 | info = {} |
|
2457 | 2457 | usebundle2 = bundle2requested(bundlecaps) |
|
2458 | 2458 | # bundle10 case |
|
2459 | 2459 | if not usebundle2: |
|
2460 | 2460 | if bundlecaps and not kwargs.get(b'cg', True): |
|
2461 | 2461 | raise ValueError( |
|
2462 | 2462 | _(b'request for bundle10 must include changegroup') |
|
2463 | 2463 | ) |
|
2464 | 2464 | |
|
2465 | 2465 | if kwargs: |
|
2466 | 2466 | raise ValueError( |
|
2467 | 2467 | _(b'unsupported getbundle arguments: %s') |
|
2468 | 2468 | % b', '.join(sorted(kwargs.keys())) |
|
2469 | 2469 | ) |
|
2470 | 2470 | outgoing = _computeoutgoing(repo, heads, common) |
|
2471 | 2471 | info[b'bundleversion'] = 1 |
|
2472 | 2472 | return ( |
|
2473 | 2473 | info, |
|
2474 | 2474 | changegroup.makestream( |
|
2475 | 2475 | repo, outgoing, b'01', source, bundlecaps=bundlecaps |
|
2476 | 2476 | ), |
|
2477 | 2477 | ) |
|
2478 | 2478 | |
|
2479 | 2479 | # bundle20 case |
|
2480 | 2480 | info[b'bundleversion'] = 2 |
|
2481 | 2481 | b2caps = {} |
|
2482 | 2482 | for bcaps in bundlecaps: |
|
2483 | 2483 | if bcaps.startswith(b'bundle2='): |
|
2484 | 2484 | blob = urlreq.unquote(bcaps[len(b'bundle2=') :]) |
|
2485 | 2485 | b2caps.update(bundle2.decodecaps(blob)) |
|
2486 | 2486 | bundler = bundle2.bundle20(repo.ui, b2caps) |
|
2487 | 2487 | |
|
2488 | 2488 | kwargs[b'heads'] = heads |
|
2489 | 2489 | kwargs[b'common'] = common |
|
2490 | 2490 | |
|
2491 | 2491 | for name in getbundle2partsorder: |
|
2492 | 2492 | func = getbundle2partsmapping[name] |
|
2493 | 2493 | func( |
|
2494 | 2494 | bundler, |
|
2495 | 2495 | repo, |
|
2496 | 2496 | source, |
|
2497 | 2497 | bundlecaps=bundlecaps, |
|
2498 | 2498 | b2caps=b2caps, |
|
2499 | 2499 | **pycompat.strkwargs(kwargs) |
|
2500 | 2500 | ) |
|
2501 | 2501 | |
|
2502 | 2502 | info[b'prefercompressed'] = bundler.prefercompressed |
|
2503 | 2503 | |
|
2504 | 2504 | return info, bundler.getchunks() |
|
2505 | 2505 | |
|
2506 | 2506 | |
|
2507 | 2507 | @getbundle2partsgenerator(b'stream2') |
|
2508 | 2508 | def _getbundlestream2(bundler, repo, *args, **kwargs): |
|
2509 | 2509 | return bundle2.addpartbundlestream2(bundler, repo, **kwargs) |
|
2510 | 2510 | |
|
2511 | 2511 | |
|
2512 | 2512 | @getbundle2partsgenerator(b'changegroup') |
|
2513 | 2513 | def _getbundlechangegrouppart( |
|
2514 | 2514 | bundler, |
|
2515 | 2515 | repo, |
|
2516 | 2516 | source, |
|
2517 | 2517 | bundlecaps=None, |
|
2518 | 2518 | b2caps=None, |
|
2519 | 2519 | heads=None, |
|
2520 | 2520 | common=None, |
|
2521 | 2521 | **kwargs |
|
2522 | 2522 | ): |
|
2523 | 2523 | """add a changegroup part to the requested bundle""" |
|
2524 | 2524 | if not kwargs.get('cg', True) or not b2caps: |
|
2525 | 2525 | return |
|
2526 | 2526 | |
|
2527 | 2527 | version = b'01' |
|
2528 | 2528 | cgversions = b2caps.get(b'changegroup') |
|
2529 | 2529 | if cgversions: # 3.1 and 3.2 ship with an empty value |
|
2530 | 2530 | cgversions = [ |
|
2531 | 2531 | v |
|
2532 | 2532 | for v in cgversions |
|
2533 | 2533 | if v in changegroup.supportedoutgoingversions(repo) |
|
2534 | 2534 | ] |
|
2535 | 2535 | if not cgversions: |
|
2536 | 2536 | raise error.Abort(_(b'no common changegroup version')) |
|
2537 | 2537 | version = max(cgversions) |
|
2538 | 2538 | |
|
2539 | 2539 | outgoing = _computeoutgoing(repo, heads, common) |
|
2540 | 2540 | if not outgoing.missing: |
|
2541 | 2541 | return |
|
2542 | 2542 | |
|
2543 | 2543 | if kwargs.get('narrow', False): |
|
2544 | 2544 | include = sorted(filter(bool, kwargs.get('includepats', []))) |
|
2545 | 2545 | exclude = sorted(filter(bool, kwargs.get('excludepats', []))) |
|
2546 | 2546 | matcher = narrowspec.match(repo.root, include=include, exclude=exclude) |
|
2547 | 2547 | else: |
|
2548 | 2548 | matcher = None |
|
2549 | 2549 | |
|
2550 | 2550 | cgstream = changegroup.makestream( |
|
2551 | 2551 | repo, outgoing, version, source, bundlecaps=bundlecaps, matcher=matcher |
|
2552 | 2552 | ) |
|
2553 | 2553 | |
|
2554 | 2554 | part = bundler.newpart(b'changegroup', data=cgstream) |
|
2555 | 2555 | if cgversions: |
|
2556 | 2556 | part.addparam(b'version', version) |
|
2557 | 2557 | |
|
2558 | 2558 | part.addparam(b'nbchanges', b'%d' % len(outgoing.missing), mandatory=False) |
|
2559 | 2559 | |
|
2560 | if requirements.TREEMANIFEST_REQUIREMENT in repo.requirements: | |
|
2560 | if scmutil.istreemanifest(repo): | |
|
2561 | 2561 | part.addparam(b'treemanifest', b'1') |
|
2562 | 2562 | |
|
2563 | 2563 | if b'exp-sidedata-flag' in repo.requirements: |
|
2564 | 2564 | part.addparam(b'exp-sidedata', b'1') |
|
2565 | 2565 | |
|
2566 | 2566 | if ( |
|
2567 | 2567 | kwargs.get('narrow', False) |
|
2568 | 2568 | and kwargs.get('narrow_acl', False) |
|
2569 | 2569 | and (include or exclude) |
|
2570 | 2570 | ): |
|
2571 | 2571 | # this is mandatory because otherwise ACL clients won't work |
|
2572 | 2572 | narrowspecpart = bundler.newpart(b'Narrow:responsespec') |
|
2573 | 2573 | narrowspecpart.data = b'%s\0%s' % ( |
|
2574 | 2574 | b'\n'.join(include), |
|
2575 | 2575 | b'\n'.join(exclude), |
|
2576 | 2576 | ) |
|
2577 | 2577 | |
|
2578 | 2578 | |
|
2579 | 2579 | @getbundle2partsgenerator(b'bookmarks') |
|
2580 | 2580 | def _getbundlebookmarkpart( |
|
2581 | 2581 | bundler, repo, source, bundlecaps=None, b2caps=None, **kwargs |
|
2582 | 2582 | ): |
|
2583 | 2583 | """add a bookmark part to the requested bundle""" |
|
2584 | 2584 | if not kwargs.get('bookmarks', False): |
|
2585 | 2585 | return |
|
2586 | 2586 | if not b2caps or b'bookmarks' not in b2caps: |
|
2587 | 2587 | raise error.Abort(_(b'no common bookmarks exchange method')) |
|
2588 | 2588 | books = bookmod.listbinbookmarks(repo) |
|
2589 | 2589 | data = bookmod.binaryencode(books) |
|
2590 | 2590 | if data: |
|
2591 | 2591 | bundler.newpart(b'bookmarks', data=data) |
|
2592 | 2592 | |
|
2593 | 2593 | |
|
2594 | 2594 | @getbundle2partsgenerator(b'listkeys') |
|
2595 | 2595 | def _getbundlelistkeysparts( |
|
2596 | 2596 | bundler, repo, source, bundlecaps=None, b2caps=None, **kwargs |
|
2597 | 2597 | ): |
|
2598 | 2598 | """add parts containing listkeys namespaces to the requested bundle""" |
|
2599 | 2599 | listkeys = kwargs.get('listkeys', ()) |
|
2600 | 2600 | for namespace in listkeys: |
|
2601 | 2601 | part = bundler.newpart(b'listkeys') |
|
2602 | 2602 | part.addparam(b'namespace', namespace) |
|
2603 | 2603 | keys = repo.listkeys(namespace).items() |
|
2604 | 2604 | part.data = pushkey.encodekeys(keys) |
|
2605 | 2605 | |
|
2606 | 2606 | |
|
2607 | 2607 | @getbundle2partsgenerator(b'obsmarkers') |
|
2608 | 2608 | def _getbundleobsmarkerpart( |
|
2609 | 2609 | bundler, repo, source, bundlecaps=None, b2caps=None, heads=None, **kwargs |
|
2610 | 2610 | ): |
|
2611 | 2611 | """add an obsolescence markers part to the requested bundle""" |
|
2612 | 2612 | if kwargs.get('obsmarkers', False): |
|
2613 | 2613 | if heads is None: |
|
2614 | 2614 | heads = repo.heads() |
|
2615 | 2615 | subset = [c.node() for c in repo.set(b'::%ln', heads)] |
|
2616 | 2616 | markers = repo.obsstore.relevantmarkers(subset) |
|
2617 | 2617 | markers = obsutil.sortedmarkers(markers) |
|
2618 | 2618 | bundle2.buildobsmarkerspart(bundler, markers) |
|
2619 | 2619 | |
|
2620 | 2620 | |
|
2621 | 2621 | @getbundle2partsgenerator(b'phases') |
|
2622 | 2622 | def _getbundlephasespart( |
|
2623 | 2623 | bundler, repo, source, bundlecaps=None, b2caps=None, heads=None, **kwargs |
|
2624 | 2624 | ): |
|
2625 | 2625 | """add phase heads part to the requested bundle""" |
|
2626 | 2626 | if kwargs.get('phases', False): |
|
2627 | 2627 | if not b2caps or b'heads' not in b2caps.get(b'phases'): |
|
2628 | 2628 | raise error.Abort(_(b'no common phases exchange method')) |
|
2629 | 2629 | if heads is None: |
|
2630 | 2630 | heads = repo.heads() |
|
2631 | 2631 | |
|
2632 | 2632 | headsbyphase = collections.defaultdict(set) |
|
2633 | 2633 | if repo.publishing(): |
|
2634 | 2634 | headsbyphase[phases.public] = heads |
|
2635 | 2635 | else: |
|
2636 | 2636 | # find the appropriate heads to move |
|
2637 | 2637 | |
|
2638 | 2638 | phase = repo._phasecache.phase |
|
2639 | 2639 | node = repo.changelog.node |
|
2640 | 2640 | rev = repo.changelog.rev |
|
2641 | 2641 | for h in heads: |
|
2642 | 2642 | headsbyphase[phase(repo, rev(h))].add(h) |
|
2643 | 2643 | seenphases = list(headsbyphase.keys()) |
|
2644 | 2644 | |
|
2645 | 2645 | # We do not handle anything but public and draft phase for now) |
|
2646 | 2646 | if seenphases: |
|
2647 | 2647 | assert max(seenphases) <= phases.draft |
|
2648 | 2648 | |
|
2649 | 2649 | # if client is pulling non-public changesets, we need to find |
|
2650 | 2650 | # intermediate public heads. |
|
2651 | 2651 | draftheads = headsbyphase.get(phases.draft, set()) |
|
2652 | 2652 | if draftheads: |
|
2653 | 2653 | publicheads = headsbyphase.get(phases.public, set()) |
|
2654 | 2654 | |
|
2655 | 2655 | revset = b'heads(only(%ln, %ln) and public())' |
|
2656 | 2656 | extraheads = repo.revs(revset, draftheads, publicheads) |
|
2657 | 2657 | for r in extraheads: |
|
2658 | 2658 | headsbyphase[phases.public].add(node(r)) |
|
2659 | 2659 | |
|
2660 | 2660 | # transform data in a format used by the encoding function |
|
2661 | 2661 | phasemapping = { |
|
2662 | 2662 | phase: sorted(headsbyphase[phase]) for phase in phases.allphases |
|
2663 | 2663 | } |
|
2664 | 2664 | |
|
2665 | 2665 | # generate the actual part |
|
2666 | 2666 | phasedata = phases.binaryencode(phasemapping) |
|
2667 | 2667 | bundler.newpart(b'phase-heads', data=phasedata) |
|
2668 | 2668 | |
|
2669 | 2669 | |
|
2670 | 2670 | @getbundle2partsgenerator(b'hgtagsfnodes') |
|
2671 | 2671 | def _getbundletagsfnodes( |
|
2672 | 2672 | bundler, |
|
2673 | 2673 | repo, |
|
2674 | 2674 | source, |
|
2675 | 2675 | bundlecaps=None, |
|
2676 | 2676 | b2caps=None, |
|
2677 | 2677 | heads=None, |
|
2678 | 2678 | common=None, |
|
2679 | 2679 | **kwargs |
|
2680 | 2680 | ): |
|
2681 | 2681 | """Transfer the .hgtags filenodes mapping. |
|
2682 | 2682 | |
|
2683 | 2683 | Only values for heads in this bundle will be transferred. |
|
2684 | 2684 | |
|
2685 | 2685 | The part data consists of pairs of 20 byte changeset node and .hgtags |
|
2686 | 2686 | filenodes raw values. |
|
2687 | 2687 | """ |
|
2688 | 2688 | # Don't send unless: |
|
2689 | 2689 | # - changeset are being exchanged, |
|
2690 | 2690 | # - the client supports it. |
|
2691 | 2691 | if not b2caps or not (kwargs.get('cg', True) and b'hgtagsfnodes' in b2caps): |
|
2692 | 2692 | return |
|
2693 | 2693 | |
|
2694 | 2694 | outgoing = _computeoutgoing(repo, heads, common) |
|
2695 | 2695 | bundle2.addparttagsfnodescache(repo, bundler, outgoing) |
|
2696 | 2696 | |
|
2697 | 2697 | |
|
2698 | 2698 | @getbundle2partsgenerator(b'cache:rev-branch-cache') |
|
2699 | 2699 | def _getbundlerevbranchcache( |
|
2700 | 2700 | bundler, |
|
2701 | 2701 | repo, |
|
2702 | 2702 | source, |
|
2703 | 2703 | bundlecaps=None, |
|
2704 | 2704 | b2caps=None, |
|
2705 | 2705 | heads=None, |
|
2706 | 2706 | common=None, |
|
2707 | 2707 | **kwargs |
|
2708 | 2708 | ): |
|
2709 | 2709 | """Transfer the rev-branch-cache mapping |
|
2710 | 2710 | |
|
2711 | 2711 | The payload is a series of data related to each branch |
|
2712 | 2712 | |
|
2713 | 2713 | 1) branch name length |
|
2714 | 2714 | 2) number of open heads |
|
2715 | 2715 | 3) number of closed heads |
|
2716 | 2716 | 4) open heads nodes |
|
2717 | 2717 | 5) closed heads nodes |
|
2718 | 2718 | """ |
|
2719 | 2719 | # Don't send unless: |
|
2720 | 2720 | # - changeset are being exchanged, |
|
2721 | 2721 | # - the client supports it. |
|
2722 | 2722 | # - narrow bundle isn't in play (not currently compatible). |
|
2723 | 2723 | if ( |
|
2724 | 2724 | not kwargs.get('cg', True) |
|
2725 | 2725 | or not b2caps |
|
2726 | 2726 | or b'rev-branch-cache' not in b2caps |
|
2727 | 2727 | or kwargs.get('narrow', False) |
|
2728 | 2728 | or repo.ui.has_section(_NARROWACL_SECTION) |
|
2729 | 2729 | ): |
|
2730 | 2730 | return |
|
2731 | 2731 | |
|
2732 | 2732 | outgoing = _computeoutgoing(repo, heads, common) |
|
2733 | 2733 | bundle2.addpartrevbranchcache(repo, bundler, outgoing) |
|
2734 | 2734 | |
|
2735 | 2735 | |
|
2736 | 2736 | def check_heads(repo, their_heads, context): |
|
2737 | 2737 | """check if the heads of a repo have been modified |
|
2738 | 2738 | |
|
2739 | 2739 | Used by peer for unbundling. |
|
2740 | 2740 | """ |
|
2741 | 2741 | heads = repo.heads() |
|
2742 | 2742 | heads_hash = hashutil.sha1(b''.join(sorted(heads))).digest() |
|
2743 | 2743 | if not ( |
|
2744 | 2744 | their_heads == [b'force'] |
|
2745 | 2745 | or their_heads == heads |
|
2746 | 2746 | or their_heads == [b'hashed', heads_hash] |
|
2747 | 2747 | ): |
|
2748 | 2748 | # someone else committed/pushed/unbundled while we |
|
2749 | 2749 | # were transferring data |
|
2750 | 2750 | raise error.PushRaced( |
|
2751 | 2751 | b'repository changed while %s - please try again' % context |
|
2752 | 2752 | ) |
|
2753 | 2753 | |
|
2754 | 2754 | |
|
2755 | 2755 | def unbundle(repo, cg, heads, source, url): |
|
2756 | 2756 | """Apply a bundle to a repo. |
|
2757 | 2757 | |
|
2758 | 2758 | this function makes sure the repo is locked during the application and have |
|
2759 | 2759 | mechanism to check that no push race occurred between the creation of the |
|
2760 | 2760 | bundle and its application. |
|
2761 | 2761 | |
|
2762 | 2762 | If the push was raced as PushRaced exception is raised.""" |
|
2763 | 2763 | r = 0 |
|
2764 | 2764 | # need a transaction when processing a bundle2 stream |
|
2765 | 2765 | # [wlock, lock, tr] - needs to be an array so nested functions can modify it |
|
2766 | 2766 | lockandtr = [None, None, None] |
|
2767 | 2767 | recordout = None |
|
2768 | 2768 | # quick fix for output mismatch with bundle2 in 3.4 |
|
2769 | 2769 | captureoutput = repo.ui.configbool( |
|
2770 | 2770 | b'experimental', b'bundle2-output-capture' |
|
2771 | 2771 | ) |
|
2772 | 2772 | if url.startswith(b'remote:http:') or url.startswith(b'remote:https:'): |
|
2773 | 2773 | captureoutput = True |
|
2774 | 2774 | try: |
|
2775 | 2775 | # note: outside bundle1, 'heads' is expected to be empty and this |
|
2776 | 2776 | # 'check_heads' call wil be a no-op |
|
2777 | 2777 | check_heads(repo, heads, b'uploading changes') |
|
2778 | 2778 | # push can proceed |
|
2779 | 2779 | if not isinstance(cg, bundle2.unbundle20): |
|
2780 | 2780 | # legacy case: bundle1 (changegroup 01) |
|
2781 | 2781 | txnname = b"\n".join([source, util.hidepassword(url)]) |
|
2782 | 2782 | with repo.lock(), repo.transaction(txnname) as tr: |
|
2783 | 2783 | op = bundle2.applybundle(repo, cg, tr, source, url) |
|
2784 | 2784 | r = bundle2.combinechangegroupresults(op) |
|
2785 | 2785 | else: |
|
2786 | 2786 | r = None |
|
2787 | 2787 | try: |
|
2788 | 2788 | |
|
2789 | 2789 | def gettransaction(): |
|
2790 | 2790 | if not lockandtr[2]: |
|
2791 | 2791 | if not bookmod.bookmarksinstore(repo): |
|
2792 | 2792 | lockandtr[0] = repo.wlock() |
|
2793 | 2793 | lockandtr[1] = repo.lock() |
|
2794 | 2794 | lockandtr[2] = repo.transaction(source) |
|
2795 | 2795 | lockandtr[2].hookargs[b'source'] = source |
|
2796 | 2796 | lockandtr[2].hookargs[b'url'] = url |
|
2797 | 2797 | lockandtr[2].hookargs[b'bundle2'] = b'1' |
|
2798 | 2798 | return lockandtr[2] |
|
2799 | 2799 | |
|
2800 | 2800 | # Do greedy locking by default until we're satisfied with lazy |
|
2801 | 2801 | # locking. |
|
2802 | 2802 | if not repo.ui.configbool( |
|
2803 | 2803 | b'experimental', b'bundle2lazylocking' |
|
2804 | 2804 | ): |
|
2805 | 2805 | gettransaction() |
|
2806 | 2806 | |
|
2807 | 2807 | op = bundle2.bundleoperation( |
|
2808 | 2808 | repo, |
|
2809 | 2809 | gettransaction, |
|
2810 | 2810 | captureoutput=captureoutput, |
|
2811 | 2811 | source=b'push', |
|
2812 | 2812 | ) |
|
2813 | 2813 | try: |
|
2814 | 2814 | op = bundle2.processbundle(repo, cg, op=op) |
|
2815 | 2815 | finally: |
|
2816 | 2816 | r = op.reply |
|
2817 | 2817 | if captureoutput and r is not None: |
|
2818 | 2818 | repo.ui.pushbuffer(error=True, subproc=True) |
|
2819 | 2819 | |
|
2820 | 2820 | def recordout(output): |
|
2821 | 2821 | r.newpart(b'output', data=output, mandatory=False) |
|
2822 | 2822 | |
|
2823 | 2823 | if lockandtr[2] is not None: |
|
2824 | 2824 | lockandtr[2].close() |
|
2825 | 2825 | except BaseException as exc: |
|
2826 | 2826 | exc.duringunbundle2 = True |
|
2827 | 2827 | if captureoutput and r is not None: |
|
2828 | 2828 | parts = exc._bundle2salvagedoutput = r.salvageoutput() |
|
2829 | 2829 | |
|
2830 | 2830 | def recordout(output): |
|
2831 | 2831 | part = bundle2.bundlepart( |
|
2832 | 2832 | b'output', data=output, mandatory=False |
|
2833 | 2833 | ) |
|
2834 | 2834 | parts.append(part) |
|
2835 | 2835 | |
|
2836 | 2836 | raise |
|
2837 | 2837 | finally: |
|
2838 | 2838 | lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0]) |
|
2839 | 2839 | if recordout is not None: |
|
2840 | 2840 | recordout(repo.ui.popbuffer()) |
|
2841 | 2841 | return r |
|
2842 | 2842 | |
|
2843 | 2843 | |
|
2844 | 2844 | def _maybeapplyclonebundle(pullop): |
|
2845 | 2845 | """Apply a clone bundle from a remote, if possible.""" |
|
2846 | 2846 | |
|
2847 | 2847 | repo = pullop.repo |
|
2848 | 2848 | remote = pullop.remote |
|
2849 | 2849 | |
|
2850 | 2850 | if not repo.ui.configbool(b'ui', b'clonebundles'): |
|
2851 | 2851 | return |
|
2852 | 2852 | |
|
2853 | 2853 | # Only run if local repo is empty. |
|
2854 | 2854 | if len(repo): |
|
2855 | 2855 | return |
|
2856 | 2856 | |
|
2857 | 2857 | if pullop.heads: |
|
2858 | 2858 | return |
|
2859 | 2859 | |
|
2860 | 2860 | if not remote.capable(b'clonebundles'): |
|
2861 | 2861 | return |
|
2862 | 2862 | |
|
2863 | 2863 | with remote.commandexecutor() as e: |
|
2864 | 2864 | res = e.callcommand(b'clonebundles', {}).result() |
|
2865 | 2865 | |
|
2866 | 2866 | # If we call the wire protocol command, that's good enough to record the |
|
2867 | 2867 | # attempt. |
|
2868 | 2868 | pullop.clonebundleattempted = True |
|
2869 | 2869 | |
|
2870 | 2870 | entries = parseclonebundlesmanifest(repo, res) |
|
2871 | 2871 | if not entries: |
|
2872 | 2872 | repo.ui.note( |
|
2873 | 2873 | _( |
|
2874 | 2874 | b'no clone bundles available on remote; ' |
|
2875 | 2875 | b'falling back to regular clone\n' |
|
2876 | 2876 | ) |
|
2877 | 2877 | ) |
|
2878 | 2878 | return |
|
2879 | 2879 | |
|
2880 | 2880 | entries = filterclonebundleentries( |
|
2881 | 2881 | repo, entries, streamclonerequested=pullop.streamclonerequested |
|
2882 | 2882 | ) |
|
2883 | 2883 | |
|
2884 | 2884 | if not entries: |
|
2885 | 2885 | # There is a thundering herd concern here. However, if a server |
|
2886 | 2886 | # operator doesn't advertise bundles appropriate for its clients, |
|
2887 | 2887 | # they deserve what's coming. Furthermore, from a client's |
|
2888 | 2888 | # perspective, no automatic fallback would mean not being able to |
|
2889 | 2889 | # clone! |
|
2890 | 2890 | repo.ui.warn( |
|
2891 | 2891 | _( |
|
2892 | 2892 | b'no compatible clone bundles available on server; ' |
|
2893 | 2893 | b'falling back to regular clone\n' |
|
2894 | 2894 | ) |
|
2895 | 2895 | ) |
|
2896 | 2896 | repo.ui.warn( |
|
2897 | 2897 | _(b'(you may want to report this to the server operator)\n') |
|
2898 | 2898 | ) |
|
2899 | 2899 | return |
|
2900 | 2900 | |
|
2901 | 2901 | entries = sortclonebundleentries(repo.ui, entries) |
|
2902 | 2902 | |
|
2903 | 2903 | url = entries[0][b'URL'] |
|
2904 | 2904 | repo.ui.status(_(b'applying clone bundle from %s\n') % url) |
|
2905 | 2905 | if trypullbundlefromurl(repo.ui, repo, url): |
|
2906 | 2906 | repo.ui.status(_(b'finished applying clone bundle\n')) |
|
2907 | 2907 | # Bundle failed. |
|
2908 | 2908 | # |
|
2909 | 2909 | # We abort by default to avoid the thundering herd of |
|
2910 | 2910 | # clients flooding a server that was expecting expensive |
|
2911 | 2911 | # clone load to be offloaded. |
|
2912 | 2912 | elif repo.ui.configbool(b'ui', b'clonebundlefallback'): |
|
2913 | 2913 | repo.ui.warn(_(b'falling back to normal clone\n')) |
|
2914 | 2914 | else: |
|
2915 | 2915 | raise error.Abort( |
|
2916 | 2916 | _(b'error applying bundle'), |
|
2917 | 2917 | hint=_( |
|
2918 | 2918 | b'if this error persists, consider contacting ' |
|
2919 | 2919 | b'the server operator or disable clone ' |
|
2920 | 2920 | b'bundles via ' |
|
2921 | 2921 | b'"--config ui.clonebundles=false"' |
|
2922 | 2922 | ), |
|
2923 | 2923 | ) |
|
2924 | 2924 | |
|
2925 | 2925 | |
|
2926 | 2926 | def parseclonebundlesmanifest(repo, s): |
|
2927 | 2927 | """Parses the raw text of a clone bundles manifest. |
|
2928 | 2928 | |
|
2929 | 2929 | Returns a list of dicts. The dicts have a ``URL`` key corresponding |
|
2930 | 2930 | to the URL and other keys are the attributes for the entry. |
|
2931 | 2931 | """ |
|
2932 | 2932 | m = [] |
|
2933 | 2933 | for line in s.splitlines(): |
|
2934 | 2934 | fields = line.split() |
|
2935 | 2935 | if not fields: |
|
2936 | 2936 | continue |
|
2937 | 2937 | attrs = {b'URL': fields[0]} |
|
2938 | 2938 | for rawattr in fields[1:]: |
|
2939 | 2939 | key, value = rawattr.split(b'=', 1) |
|
2940 | 2940 | key = urlreq.unquote(key) |
|
2941 | 2941 | value = urlreq.unquote(value) |
|
2942 | 2942 | attrs[key] = value |
|
2943 | 2943 | |
|
2944 | 2944 | # Parse BUNDLESPEC into components. This makes client-side |
|
2945 | 2945 | # preferences easier to specify since you can prefer a single |
|
2946 | 2946 | # component of the BUNDLESPEC. |
|
2947 | 2947 | if key == b'BUNDLESPEC': |
|
2948 | 2948 | try: |
|
2949 | 2949 | bundlespec = parsebundlespec(repo, value) |
|
2950 | 2950 | attrs[b'COMPRESSION'] = bundlespec.compression |
|
2951 | 2951 | attrs[b'VERSION'] = bundlespec.version |
|
2952 | 2952 | except error.InvalidBundleSpecification: |
|
2953 | 2953 | pass |
|
2954 | 2954 | except error.UnsupportedBundleSpecification: |
|
2955 | 2955 | pass |
|
2956 | 2956 | |
|
2957 | 2957 | m.append(attrs) |
|
2958 | 2958 | |
|
2959 | 2959 | return m |
|
2960 | 2960 | |
|
2961 | 2961 | |
|
2962 | 2962 | def isstreamclonespec(bundlespec): |
|
2963 | 2963 | # Stream clone v1 |
|
2964 | 2964 | if bundlespec.wirecompression == b'UN' and bundlespec.wireversion == b's1': |
|
2965 | 2965 | return True |
|
2966 | 2966 | |
|
2967 | 2967 | # Stream clone v2 |
|
2968 | 2968 | if ( |
|
2969 | 2969 | bundlespec.wirecompression == b'UN' |
|
2970 | 2970 | and bundlespec.wireversion == b'02' |
|
2971 | 2971 | and bundlespec.contentopts.get(b'streamv2') |
|
2972 | 2972 | ): |
|
2973 | 2973 | return True |
|
2974 | 2974 | |
|
2975 | 2975 | return False |
|
2976 | 2976 | |
|
2977 | 2977 | |
|
2978 | 2978 | def filterclonebundleentries(repo, entries, streamclonerequested=False): |
|
2979 | 2979 | """Remove incompatible clone bundle manifest entries. |
|
2980 | 2980 | |
|
2981 | 2981 | Accepts a list of entries parsed with ``parseclonebundlesmanifest`` |
|
2982 | 2982 | and returns a new list consisting of only the entries that this client |
|
2983 | 2983 | should be able to apply. |
|
2984 | 2984 | |
|
2985 | 2985 | There is no guarantee we'll be able to apply all returned entries because |
|
2986 | 2986 | the metadata we use to filter on may be missing or wrong. |
|
2987 | 2987 | """ |
|
2988 | 2988 | newentries = [] |
|
2989 | 2989 | for entry in entries: |
|
2990 | 2990 | spec = entry.get(b'BUNDLESPEC') |
|
2991 | 2991 | if spec: |
|
2992 | 2992 | try: |
|
2993 | 2993 | bundlespec = parsebundlespec(repo, spec, strict=True) |
|
2994 | 2994 | |
|
2995 | 2995 | # If a stream clone was requested, filter out non-streamclone |
|
2996 | 2996 | # entries. |
|
2997 | 2997 | if streamclonerequested and not isstreamclonespec(bundlespec): |
|
2998 | 2998 | repo.ui.debug( |
|
2999 | 2999 | b'filtering %s because not a stream clone\n' |
|
3000 | 3000 | % entry[b'URL'] |
|
3001 | 3001 | ) |
|
3002 | 3002 | continue |
|
3003 | 3003 | |
|
3004 | 3004 | except error.InvalidBundleSpecification as e: |
|
3005 | 3005 | repo.ui.debug(stringutil.forcebytestr(e) + b'\n') |
|
3006 | 3006 | continue |
|
3007 | 3007 | except error.UnsupportedBundleSpecification as e: |
|
3008 | 3008 | repo.ui.debug( |
|
3009 | 3009 | b'filtering %s because unsupported bundle ' |
|
3010 | 3010 | b'spec: %s\n' % (entry[b'URL'], stringutil.forcebytestr(e)) |
|
3011 | 3011 | ) |
|
3012 | 3012 | continue |
|
3013 | 3013 | # If we don't have a spec and requested a stream clone, we don't know |
|
3014 | 3014 | # what the entry is so don't attempt to apply it. |
|
3015 | 3015 | elif streamclonerequested: |
|
3016 | 3016 | repo.ui.debug( |
|
3017 | 3017 | b'filtering %s because cannot determine if a stream ' |
|
3018 | 3018 | b'clone bundle\n' % entry[b'URL'] |
|
3019 | 3019 | ) |
|
3020 | 3020 | continue |
|
3021 | 3021 | |
|
3022 | 3022 | if b'REQUIRESNI' in entry and not sslutil.hassni: |
|
3023 | 3023 | repo.ui.debug( |
|
3024 | 3024 | b'filtering %s because SNI not supported\n' % entry[b'URL'] |
|
3025 | 3025 | ) |
|
3026 | 3026 | continue |
|
3027 | 3027 | |
|
3028 | 3028 | if b'REQUIREDRAM' in entry: |
|
3029 | 3029 | try: |
|
3030 | 3030 | requiredram = util.sizetoint(entry[b'REQUIREDRAM']) |
|
3031 | 3031 | except error.ParseError: |
|
3032 | 3032 | repo.ui.debug( |
|
3033 | 3033 | b'filtering %s due to a bad REQUIREDRAM attribute\n' |
|
3034 | 3034 | % entry[b'URL'] |
|
3035 | 3035 | ) |
|
3036 | 3036 | continue |
|
3037 | 3037 | actualram = repo.ui.estimatememory() |
|
3038 | 3038 | if actualram is not None and actualram * 0.66 < requiredram: |
|
3039 | 3039 | repo.ui.debug( |
|
3040 | 3040 | b'filtering %s as it needs more than 2/3 of system memory\n' |
|
3041 | 3041 | % entry[b'URL'] |
|
3042 | 3042 | ) |
|
3043 | 3043 | continue |
|
3044 | 3044 | |
|
3045 | 3045 | newentries.append(entry) |
|
3046 | 3046 | |
|
3047 | 3047 | return newentries |
|
3048 | 3048 | |
|
3049 | 3049 | |
|
3050 | 3050 | class clonebundleentry(object): |
|
3051 | 3051 | """Represents an item in a clone bundles manifest. |
|
3052 | 3052 | |
|
3053 | 3053 | This rich class is needed to support sorting since sorted() in Python 3 |
|
3054 | 3054 | doesn't support ``cmp`` and our comparison is complex enough that ``key=`` |
|
3055 | 3055 | won't work. |
|
3056 | 3056 | """ |
|
3057 | 3057 | |
|
3058 | 3058 | def __init__(self, value, prefers): |
|
3059 | 3059 | self.value = value |
|
3060 | 3060 | self.prefers = prefers |
|
3061 | 3061 | |
|
3062 | 3062 | def _cmp(self, other): |
|
3063 | 3063 | for prefkey, prefvalue in self.prefers: |
|
3064 | 3064 | avalue = self.value.get(prefkey) |
|
3065 | 3065 | bvalue = other.value.get(prefkey) |
|
3066 | 3066 | |
|
3067 | 3067 | # Special case for b missing attribute and a matches exactly. |
|
3068 | 3068 | if avalue is not None and bvalue is None and avalue == prefvalue: |
|
3069 | 3069 | return -1 |
|
3070 | 3070 | |
|
3071 | 3071 | # Special case for a missing attribute and b matches exactly. |
|
3072 | 3072 | if bvalue is not None and avalue is None and bvalue == prefvalue: |
|
3073 | 3073 | return 1 |
|
3074 | 3074 | |
|
3075 | 3075 | # We can't compare unless attribute present on both. |
|
3076 | 3076 | if avalue is None or bvalue is None: |
|
3077 | 3077 | continue |
|
3078 | 3078 | |
|
3079 | 3079 | # Same values should fall back to next attribute. |
|
3080 | 3080 | if avalue == bvalue: |
|
3081 | 3081 | continue |
|
3082 | 3082 | |
|
3083 | 3083 | # Exact matches come first. |
|
3084 | 3084 | if avalue == prefvalue: |
|
3085 | 3085 | return -1 |
|
3086 | 3086 | if bvalue == prefvalue: |
|
3087 | 3087 | return 1 |
|
3088 | 3088 | |
|
3089 | 3089 | # Fall back to next attribute. |
|
3090 | 3090 | continue |
|
3091 | 3091 | |
|
3092 | 3092 | # If we got here we couldn't sort by attributes and prefers. Fall |
|
3093 | 3093 | # back to index order. |
|
3094 | 3094 | return 0 |
|
3095 | 3095 | |
|
3096 | 3096 | def __lt__(self, other): |
|
3097 | 3097 | return self._cmp(other) < 0 |
|
3098 | 3098 | |
|
3099 | 3099 | def __gt__(self, other): |
|
3100 | 3100 | return self._cmp(other) > 0 |
|
3101 | 3101 | |
|
3102 | 3102 | def __eq__(self, other): |
|
3103 | 3103 | return self._cmp(other) == 0 |
|
3104 | 3104 | |
|
3105 | 3105 | def __le__(self, other): |
|
3106 | 3106 | return self._cmp(other) <= 0 |
|
3107 | 3107 | |
|
3108 | 3108 | def __ge__(self, other): |
|
3109 | 3109 | return self._cmp(other) >= 0 |
|
3110 | 3110 | |
|
3111 | 3111 | def __ne__(self, other): |
|
3112 | 3112 | return self._cmp(other) != 0 |
|
3113 | 3113 | |
|
3114 | 3114 | |
|
3115 | 3115 | def sortclonebundleentries(ui, entries): |
|
3116 | 3116 | prefers = ui.configlist(b'ui', b'clonebundleprefers') |
|
3117 | 3117 | if not prefers: |
|
3118 | 3118 | return list(entries) |
|
3119 | 3119 | |
|
3120 | 3120 | def _split(p): |
|
3121 | 3121 | if b'=' not in p: |
|
3122 | 3122 | hint = _(b"each comma separated item should be key=value pairs") |
|
3123 | 3123 | raise error.Abort( |
|
3124 | 3124 | _(b"invalid ui.clonebundleprefers item: %s") % p, hint=hint |
|
3125 | 3125 | ) |
|
3126 | 3126 | return p.split(b'=', 1) |
|
3127 | 3127 | |
|
3128 | 3128 | prefers = [_split(p) for p in prefers] |
|
3129 | 3129 | |
|
3130 | 3130 | items = sorted(clonebundleentry(v, prefers) for v in entries) |
|
3131 | 3131 | return [i.value for i in items] |
|
3132 | 3132 | |
|
3133 | 3133 | |
|
3134 | 3134 | def trypullbundlefromurl(ui, repo, url): |
|
3135 | 3135 | """Attempt to apply a bundle from a URL.""" |
|
3136 | 3136 | with repo.lock(), repo.transaction(b'bundleurl') as tr: |
|
3137 | 3137 | try: |
|
3138 | 3138 | fh = urlmod.open(ui, url) |
|
3139 | 3139 | cg = readbundle(ui, fh, b'stream') |
|
3140 | 3140 | |
|
3141 | 3141 | if isinstance(cg, streamclone.streamcloneapplier): |
|
3142 | 3142 | cg.apply(repo) |
|
3143 | 3143 | else: |
|
3144 | 3144 | bundle2.applybundle(repo, cg, tr, b'clonebundles', url) |
|
3145 | 3145 | return True |
|
3146 | 3146 | except urlerr.httperror as e: |
|
3147 | 3147 | ui.warn( |
|
3148 | 3148 | _(b'HTTP error fetching bundle: %s\n') |
|
3149 | 3149 | % stringutil.forcebytestr(e) |
|
3150 | 3150 | ) |
|
3151 | 3151 | except urlerr.urlerror as e: |
|
3152 | 3152 | ui.warn( |
|
3153 | 3153 | _(b'error fetching bundle: %s\n') |
|
3154 | 3154 | % stringutil.forcebytestr(e.reason) |
|
3155 | 3155 | ) |
|
3156 | 3156 | |
|
3157 | 3157 | return False |
@@ -1,540 +1,541 | |||
|
1 | 1 | # repair.py - functions for repository repair for mercurial |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2005, 2006 Chris Mason <mason@suse.com> |
|
4 | 4 | # Copyright 2007 Matt Mackall |
|
5 | 5 | # |
|
6 | 6 | # This software may be used and distributed according to the terms of the |
|
7 | 7 | # GNU General Public License version 2 or any later version. |
|
8 | 8 | |
|
9 | 9 | from __future__ import absolute_import |
|
10 | 10 | |
|
11 | 11 | import errno |
|
12 | 12 | |
|
13 | 13 | from .i18n import _ |
|
14 | 14 | from .node import ( |
|
15 | 15 | hex, |
|
16 | 16 | short, |
|
17 | 17 | ) |
|
18 | 18 | from . import ( |
|
19 | 19 | bundle2, |
|
20 | 20 | changegroup, |
|
21 | 21 | discovery, |
|
22 | 22 | error, |
|
23 | 23 | exchange, |
|
24 | 24 | obsolete, |
|
25 | 25 | obsutil, |
|
26 | 26 | pathutil, |
|
27 | 27 | phases, |
|
28 | 28 | pycompat, |
|
29 | 29 | requirements, |
|
30 | scmutil, | |
|
30 | 31 | util, |
|
31 | 32 | ) |
|
32 | 33 | from .utils import ( |
|
33 | 34 | hashutil, |
|
34 | 35 | stringutil, |
|
35 | 36 | ) |
|
36 | 37 | |
|
37 | 38 | |
|
38 | 39 | def backupbundle( |
|
39 | 40 | repo, bases, heads, node, suffix, compress=True, obsolescence=True |
|
40 | 41 | ): |
|
41 | 42 | """create a bundle with the specified revisions as a backup""" |
|
42 | 43 | |
|
43 | 44 | backupdir = b"strip-backup" |
|
44 | 45 | vfs = repo.vfs |
|
45 | 46 | if not vfs.isdir(backupdir): |
|
46 | 47 | vfs.mkdir(backupdir) |
|
47 | 48 | |
|
48 | 49 | # Include a hash of all the nodes in the filename for uniqueness |
|
49 | 50 | allcommits = repo.set(b'%ln::%ln', bases, heads) |
|
50 | 51 | allhashes = sorted(c.hex() for c in allcommits) |
|
51 | 52 | totalhash = hashutil.sha1(b''.join(allhashes)).digest() |
|
52 | 53 | name = b"%s/%s-%s-%s.hg" % ( |
|
53 | 54 | backupdir, |
|
54 | 55 | short(node), |
|
55 | 56 | hex(totalhash[:4]), |
|
56 | 57 | suffix, |
|
57 | 58 | ) |
|
58 | 59 | |
|
59 | 60 | cgversion = changegroup.localversion(repo) |
|
60 | 61 | comp = None |
|
61 | 62 | if cgversion != b'01': |
|
62 | 63 | bundletype = b"HG20" |
|
63 | 64 | if compress: |
|
64 | 65 | comp = b'BZ' |
|
65 | 66 | elif compress: |
|
66 | 67 | bundletype = b"HG10BZ" |
|
67 | 68 | else: |
|
68 | 69 | bundletype = b"HG10UN" |
|
69 | 70 | |
|
70 | 71 | outgoing = discovery.outgoing(repo, missingroots=bases, ancestorsof=heads) |
|
71 | 72 | contentopts = { |
|
72 | 73 | b'cg.version': cgversion, |
|
73 | 74 | b'obsolescence': obsolescence, |
|
74 | 75 | b'phases': True, |
|
75 | 76 | } |
|
76 | 77 | return bundle2.writenewbundle( |
|
77 | 78 | repo.ui, |
|
78 | 79 | repo, |
|
79 | 80 | b'strip', |
|
80 | 81 | name, |
|
81 | 82 | bundletype, |
|
82 | 83 | outgoing, |
|
83 | 84 | contentopts, |
|
84 | 85 | vfs, |
|
85 | 86 | compression=comp, |
|
86 | 87 | ) |
|
87 | 88 | |
|
88 | 89 | |
|
89 | 90 | def _collectfiles(repo, striprev): |
|
90 | 91 | """find out the filelogs affected by the strip""" |
|
91 | 92 | files = set() |
|
92 | 93 | |
|
93 | 94 | for x in pycompat.xrange(striprev, len(repo)): |
|
94 | 95 | files.update(repo[x].files()) |
|
95 | 96 | |
|
96 | 97 | return sorted(files) |
|
97 | 98 | |
|
98 | 99 | |
|
99 | 100 | def _collectrevlog(revlog, striprev): |
|
100 | 101 | _, brokenset = revlog.getstrippoint(striprev) |
|
101 | 102 | return [revlog.linkrev(r) for r in brokenset] |
|
102 | 103 | |
|
103 | 104 | |
|
104 | 105 | def _collectbrokencsets(repo, files, striprev): |
|
105 | 106 | """return the changesets which will be broken by the truncation""" |
|
106 | 107 | s = set() |
|
107 | 108 | |
|
108 | 109 | for revlog in manifestrevlogs(repo): |
|
109 | 110 | s.update(_collectrevlog(revlog, striprev)) |
|
110 | 111 | for fname in files: |
|
111 | 112 | s.update(_collectrevlog(repo.file(fname), striprev)) |
|
112 | 113 | |
|
113 | 114 | return s |
|
114 | 115 | |
|
115 | 116 | |
|
116 | 117 | def strip(ui, repo, nodelist, backup=True, topic=b'backup'): |
|
117 | 118 | # This function requires the caller to lock the repo, but it operates |
|
118 | 119 | # within a transaction of its own, and thus requires there to be no current |
|
119 | 120 | # transaction when it is called. |
|
120 | 121 | if repo.currenttransaction() is not None: |
|
121 | 122 | raise error.ProgrammingError(b'cannot strip from inside a transaction') |
|
122 | 123 | |
|
123 | 124 | # Simple way to maintain backwards compatibility for this |
|
124 | 125 | # argument. |
|
125 | 126 | if backup in [b'none', b'strip']: |
|
126 | 127 | backup = False |
|
127 | 128 | |
|
128 | 129 | repo = repo.unfiltered() |
|
129 | 130 | repo.destroying() |
|
130 | 131 | vfs = repo.vfs |
|
131 | 132 | # load bookmark before changelog to avoid side effect from outdated |
|
132 | 133 | # changelog (see repo._refreshchangelog) |
|
133 | 134 | repo._bookmarks |
|
134 | 135 | cl = repo.changelog |
|
135 | 136 | |
|
136 | 137 | # TODO handle undo of merge sets |
|
137 | 138 | if isinstance(nodelist, bytes): |
|
138 | 139 | nodelist = [nodelist] |
|
139 | 140 | striplist = [cl.rev(node) for node in nodelist] |
|
140 | 141 | striprev = min(striplist) |
|
141 | 142 | |
|
142 | 143 | files = _collectfiles(repo, striprev) |
|
143 | 144 | saverevs = _collectbrokencsets(repo, files, striprev) |
|
144 | 145 | |
|
145 | 146 | # Some revisions with rev > striprev may not be descendants of striprev. |
|
146 | 147 | # We have to find these revisions and put them in a bundle, so that |
|
147 | 148 | # we can restore them after the truncations. |
|
148 | 149 | # To create the bundle we use repo.changegroupsubset which requires |
|
149 | 150 | # the list of heads and bases of the set of interesting revisions. |
|
150 | 151 | # (head = revision in the set that has no descendant in the set; |
|
151 | 152 | # base = revision in the set that has no ancestor in the set) |
|
152 | 153 | tostrip = set(striplist) |
|
153 | 154 | saveheads = set(saverevs) |
|
154 | 155 | for r in cl.revs(start=striprev + 1): |
|
155 | 156 | if any(p in tostrip for p in cl.parentrevs(r)): |
|
156 | 157 | tostrip.add(r) |
|
157 | 158 | |
|
158 | 159 | if r not in tostrip: |
|
159 | 160 | saverevs.add(r) |
|
160 | 161 | saveheads.difference_update(cl.parentrevs(r)) |
|
161 | 162 | saveheads.add(r) |
|
162 | 163 | saveheads = [cl.node(r) for r in saveheads] |
|
163 | 164 | |
|
164 | 165 | # compute base nodes |
|
165 | 166 | if saverevs: |
|
166 | 167 | descendants = set(cl.descendants(saverevs)) |
|
167 | 168 | saverevs.difference_update(descendants) |
|
168 | 169 | savebases = [cl.node(r) for r in saverevs] |
|
169 | 170 | stripbases = [cl.node(r) for r in tostrip] |
|
170 | 171 | |
|
171 | 172 | stripobsidx = obsmarkers = () |
|
172 | 173 | if repo.ui.configbool(b'devel', b'strip-obsmarkers'): |
|
173 | 174 | obsmarkers = obsutil.exclusivemarkers(repo, stripbases) |
|
174 | 175 | if obsmarkers: |
|
175 | 176 | stripobsidx = [ |
|
176 | 177 | i for i, m in enumerate(repo.obsstore) if m in obsmarkers |
|
177 | 178 | ] |
|
178 | 179 | |
|
179 | 180 | newbmtarget, updatebm = _bookmarkmovements(repo, tostrip) |
|
180 | 181 | |
|
181 | 182 | backupfile = None |
|
182 | 183 | node = nodelist[-1] |
|
183 | 184 | if backup: |
|
184 | 185 | backupfile = _createstripbackup(repo, stripbases, node, topic) |
|
185 | 186 | # create a changegroup for all the branches we need to keep |
|
186 | 187 | tmpbundlefile = None |
|
187 | 188 | if saveheads: |
|
188 | 189 | # do not compress temporary bundle if we remove it from disk later |
|
189 | 190 | # |
|
190 | 191 | # We do not include obsolescence, it might re-introduce prune markers |
|
191 | 192 | # we are trying to strip. This is harmless since the stripped markers |
|
192 | 193 | # are already backed up and we did not touched the markers for the |
|
193 | 194 | # saved changesets. |
|
194 | 195 | tmpbundlefile = backupbundle( |
|
195 | 196 | repo, |
|
196 | 197 | savebases, |
|
197 | 198 | saveheads, |
|
198 | 199 | node, |
|
199 | 200 | b'temp', |
|
200 | 201 | compress=False, |
|
201 | 202 | obsolescence=False, |
|
202 | 203 | ) |
|
203 | 204 | |
|
204 | 205 | with ui.uninterruptible(): |
|
205 | 206 | try: |
|
206 | 207 | with repo.transaction(b"strip") as tr: |
|
207 | 208 | # TODO this code violates the interface abstraction of the |
|
208 | 209 | # transaction and makes assumptions that file storage is |
|
209 | 210 | # using append-only files. We'll need some kind of storage |
|
210 | 211 | # API to handle stripping for us. |
|
211 | 212 | offset = len(tr._entries) |
|
212 | 213 | |
|
213 | 214 | tr.startgroup() |
|
214 | 215 | cl.strip(striprev, tr) |
|
215 | 216 | stripmanifest(repo, striprev, tr, files) |
|
216 | 217 | |
|
217 | 218 | for fn in files: |
|
218 | 219 | repo.file(fn).strip(striprev, tr) |
|
219 | 220 | tr.endgroup() |
|
220 | 221 | |
|
221 | 222 | for i in pycompat.xrange(offset, len(tr._entries)): |
|
222 | 223 | file, troffset, ignore = tr._entries[i] |
|
223 | 224 | with repo.svfs(file, b'a', checkambig=True) as fp: |
|
224 | 225 | fp.truncate(troffset) |
|
225 | 226 | if troffset == 0: |
|
226 | 227 | repo.store.markremoved(file) |
|
227 | 228 | |
|
228 | 229 | deleteobsmarkers(repo.obsstore, stripobsidx) |
|
229 | 230 | del repo.obsstore |
|
230 | 231 | repo.invalidatevolatilesets() |
|
231 | 232 | repo._phasecache.filterunknown(repo) |
|
232 | 233 | |
|
233 | 234 | if tmpbundlefile: |
|
234 | 235 | ui.note(_(b"adding branch\n")) |
|
235 | 236 | f = vfs.open(tmpbundlefile, b"rb") |
|
236 | 237 | gen = exchange.readbundle(ui, f, tmpbundlefile, vfs) |
|
237 | 238 | if not repo.ui.verbose: |
|
238 | 239 | # silence internal shuffling chatter |
|
239 | 240 | repo.ui.pushbuffer() |
|
240 | 241 | tmpbundleurl = b'bundle:' + vfs.join(tmpbundlefile) |
|
241 | 242 | txnname = b'strip' |
|
242 | 243 | if not isinstance(gen, bundle2.unbundle20): |
|
243 | 244 | txnname = b"strip\n%s" % util.hidepassword(tmpbundleurl) |
|
244 | 245 | with repo.transaction(txnname) as tr: |
|
245 | 246 | bundle2.applybundle( |
|
246 | 247 | repo, gen, tr, source=b'strip', url=tmpbundleurl |
|
247 | 248 | ) |
|
248 | 249 | if not repo.ui.verbose: |
|
249 | 250 | repo.ui.popbuffer() |
|
250 | 251 | f.close() |
|
251 | 252 | |
|
252 | 253 | with repo.transaction(b'repair') as tr: |
|
253 | 254 | bmchanges = [(m, repo[newbmtarget].node()) for m in updatebm] |
|
254 | 255 | repo._bookmarks.applychanges(repo, tr, bmchanges) |
|
255 | 256 | |
|
256 | 257 | # remove undo files |
|
257 | 258 | for undovfs, undofile in repo.undofiles(): |
|
258 | 259 | try: |
|
259 | 260 | undovfs.unlink(undofile) |
|
260 | 261 | except OSError as e: |
|
261 | 262 | if e.errno != errno.ENOENT: |
|
262 | 263 | ui.warn( |
|
263 | 264 | _(b'error removing %s: %s\n') |
|
264 | 265 | % ( |
|
265 | 266 | undovfs.join(undofile), |
|
266 | 267 | stringutil.forcebytestr(e), |
|
267 | 268 | ) |
|
268 | 269 | ) |
|
269 | 270 | |
|
270 | 271 | except: # re-raises |
|
271 | 272 | if backupfile: |
|
272 | 273 | ui.warn( |
|
273 | 274 | _(b"strip failed, backup bundle stored in '%s'\n") |
|
274 | 275 | % vfs.join(backupfile) |
|
275 | 276 | ) |
|
276 | 277 | if tmpbundlefile: |
|
277 | 278 | ui.warn( |
|
278 | 279 | _(b"strip failed, unrecovered changes stored in '%s'\n") |
|
279 | 280 | % vfs.join(tmpbundlefile) |
|
280 | 281 | ) |
|
281 | 282 | ui.warn( |
|
282 | 283 | _( |
|
283 | 284 | b"(fix the problem, then recover the changesets with " |
|
284 | 285 | b"\"hg unbundle '%s'\")\n" |
|
285 | 286 | ) |
|
286 | 287 | % vfs.join(tmpbundlefile) |
|
287 | 288 | ) |
|
288 | 289 | raise |
|
289 | 290 | else: |
|
290 | 291 | if tmpbundlefile: |
|
291 | 292 | # Remove temporary bundle only if there were no exceptions |
|
292 | 293 | vfs.unlink(tmpbundlefile) |
|
293 | 294 | |
|
294 | 295 | repo.destroyed() |
|
295 | 296 | # return the backup file path (or None if 'backup' was False) so |
|
296 | 297 | # extensions can use it |
|
297 | 298 | return backupfile |
|
298 | 299 | |
|
299 | 300 | |
|
300 | 301 | def softstrip(ui, repo, nodelist, backup=True, topic=b'backup'): |
|
301 | 302 | """perform a "soft" strip using the archived phase""" |
|
302 | 303 | tostrip = [c.node() for c in repo.set(b'sort(%ln::)', nodelist)] |
|
303 | 304 | if not tostrip: |
|
304 | 305 | return None |
|
305 | 306 | |
|
306 | 307 | newbmtarget, updatebm = _bookmarkmovements(repo, tostrip) |
|
307 | 308 | if backup: |
|
308 | 309 | node = tostrip[0] |
|
309 | 310 | backupfile = _createstripbackup(repo, tostrip, node, topic) |
|
310 | 311 | |
|
311 | 312 | with repo.transaction(b'strip') as tr: |
|
312 | 313 | phases.retractboundary(repo, tr, phases.archived, tostrip) |
|
313 | 314 | bmchanges = [(m, repo[newbmtarget].node()) for m in updatebm] |
|
314 | 315 | repo._bookmarks.applychanges(repo, tr, bmchanges) |
|
315 | 316 | return backupfile |
|
316 | 317 | |
|
317 | 318 | |
|
318 | 319 | def _bookmarkmovements(repo, tostrip): |
|
319 | 320 | # compute necessary bookmark movement |
|
320 | 321 | bm = repo._bookmarks |
|
321 | 322 | updatebm = [] |
|
322 | 323 | for m in bm: |
|
323 | 324 | rev = repo[bm[m]].rev() |
|
324 | 325 | if rev in tostrip: |
|
325 | 326 | updatebm.append(m) |
|
326 | 327 | newbmtarget = None |
|
327 | 328 | # If we need to move bookmarks, compute bookmark |
|
328 | 329 | # targets. Otherwise we can skip doing this logic. |
|
329 | 330 | if updatebm: |
|
330 | 331 | # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)), |
|
331 | 332 | # but is much faster |
|
332 | 333 | newbmtarget = repo.revs(b'max(parents(%ld) - (%ld))', tostrip, tostrip) |
|
333 | 334 | if newbmtarget: |
|
334 | 335 | newbmtarget = repo[newbmtarget.first()].node() |
|
335 | 336 | else: |
|
336 | 337 | newbmtarget = b'.' |
|
337 | 338 | return newbmtarget, updatebm |
|
338 | 339 | |
|
339 | 340 | |
|
340 | 341 | def _createstripbackup(repo, stripbases, node, topic): |
|
341 | 342 | # backup the changeset we are about to strip |
|
342 | 343 | vfs = repo.vfs |
|
343 | 344 | cl = repo.changelog |
|
344 | 345 | backupfile = backupbundle(repo, stripbases, cl.heads(), node, topic) |
|
345 | 346 | repo.ui.status(_(b"saved backup bundle to %s\n") % vfs.join(backupfile)) |
|
346 | 347 | repo.ui.log( |
|
347 | 348 | b"backupbundle", b"saved backup bundle to %s\n", vfs.join(backupfile) |
|
348 | 349 | ) |
|
349 | 350 | return backupfile |
|
350 | 351 | |
|
351 | 352 | |
|
352 | 353 | def safestriproots(ui, repo, nodes): |
|
353 | 354 | """return list of roots of nodes where descendants are covered by nodes""" |
|
354 | 355 | torev = repo.unfiltered().changelog.rev |
|
355 | 356 | revs = {torev(n) for n in nodes} |
|
356 | 357 | # tostrip = wanted - unsafe = wanted - ancestors(orphaned) |
|
357 | 358 | # orphaned = affected - wanted |
|
358 | 359 | # affected = descendants(roots(wanted)) |
|
359 | 360 | # wanted = revs |
|
360 | 361 | revset = b'%ld - ( ::( (roots(%ld):: and not _phase(%s)) -%ld) )' |
|
361 | 362 | tostrip = set(repo.revs(revset, revs, revs, phases.internal, revs)) |
|
362 | 363 | notstrip = revs - tostrip |
|
363 | 364 | if notstrip: |
|
364 | 365 | nodestr = b', '.join(sorted(short(repo[n].node()) for n in notstrip)) |
|
365 | 366 | ui.warn( |
|
366 | 367 | _(b'warning: orphaned descendants detected, not stripping %s\n') |
|
367 | 368 | % nodestr |
|
368 | 369 | ) |
|
369 | 370 | return [c.node() for c in repo.set(b'roots(%ld)', tostrip)] |
|
370 | 371 | |
|
371 | 372 | |
|
372 | 373 | class stripcallback(object): |
|
373 | 374 | """used as a transaction postclose callback""" |
|
374 | 375 | |
|
375 | 376 | def __init__(self, ui, repo, backup, topic): |
|
376 | 377 | self.ui = ui |
|
377 | 378 | self.repo = repo |
|
378 | 379 | self.backup = backup |
|
379 | 380 | self.topic = topic or b'backup' |
|
380 | 381 | self.nodelist = [] |
|
381 | 382 | |
|
382 | 383 | def addnodes(self, nodes): |
|
383 | 384 | self.nodelist.extend(nodes) |
|
384 | 385 | |
|
385 | 386 | def __call__(self, tr): |
|
386 | 387 | roots = safestriproots(self.ui, self.repo, self.nodelist) |
|
387 | 388 | if roots: |
|
388 | 389 | strip(self.ui, self.repo, roots, self.backup, self.topic) |
|
389 | 390 | |
|
390 | 391 | |
|
391 | 392 | def delayedstrip(ui, repo, nodelist, topic=None, backup=True): |
|
392 | 393 | """like strip, but works inside transaction and won't strip irreverent revs |
|
393 | 394 | |
|
394 | 395 | nodelist must explicitly contain all descendants. Otherwise a warning will |
|
395 | 396 | be printed that some nodes are not stripped. |
|
396 | 397 | |
|
397 | 398 | Will do a backup if `backup` is True. The last non-None "topic" will be |
|
398 | 399 | used as the backup topic name. The default backup topic name is "backup". |
|
399 | 400 | """ |
|
400 | 401 | tr = repo.currenttransaction() |
|
401 | 402 | if not tr: |
|
402 | 403 | nodes = safestriproots(ui, repo, nodelist) |
|
403 | 404 | return strip(ui, repo, nodes, backup=backup, topic=topic) |
|
404 | 405 | # transaction postclose callbacks are called in alphabet order. |
|
405 | 406 | # use '\xff' as prefix so we are likely to be called last. |
|
406 | 407 | callback = tr.getpostclose(b'\xffstrip') |
|
407 | 408 | if callback is None: |
|
408 | 409 | callback = stripcallback(ui, repo, backup=backup, topic=topic) |
|
409 | 410 | tr.addpostclose(b'\xffstrip', callback) |
|
410 | 411 | if topic: |
|
411 | 412 | callback.topic = topic |
|
412 | 413 | callback.addnodes(nodelist) |
|
413 | 414 | |
|
414 | 415 | |
|
415 | 416 | def stripmanifest(repo, striprev, tr, files): |
|
416 | 417 | for revlog in manifestrevlogs(repo): |
|
417 | 418 | revlog.strip(striprev, tr) |
|
418 | 419 | |
|
419 | 420 | |
|
420 | 421 | def manifestrevlogs(repo): |
|
421 | 422 | yield repo.manifestlog.getstorage(b'') |
|
422 | if requirements.TREEMANIFEST_REQUIREMENT in repo.requirements: | |
|
423 | if scmutil.istreemanifest(repo): | |
|
423 | 424 | # This logic is safe if treemanifest isn't enabled, but also |
|
424 | 425 | # pointless, so we skip it if treemanifest isn't enabled. |
|
425 | 426 | for unencoded, encoded, size in repo.store.datafiles(): |
|
426 | 427 | if unencoded.startswith(b'meta/') and unencoded.endswith( |
|
427 | 428 | b'00manifest.i' |
|
428 | 429 | ): |
|
429 | 430 | dir = unencoded[5:-12] |
|
430 | 431 | yield repo.manifestlog.getstorage(dir) |
|
431 | 432 | |
|
432 | 433 | |
|
433 | 434 | def rebuildfncache(ui, repo): |
|
434 | 435 | """Rebuilds the fncache file from repo history. |
|
435 | 436 | |
|
436 | 437 | Missing entries will be added. Extra entries will be removed. |
|
437 | 438 | """ |
|
438 | 439 | repo = repo.unfiltered() |
|
439 | 440 | |
|
440 | 441 | if b'fncache' not in repo.requirements: |
|
441 | 442 | ui.warn( |
|
442 | 443 | _( |
|
443 | 444 | b'(not rebuilding fncache because repository does not ' |
|
444 | 445 | b'support fncache)\n' |
|
445 | 446 | ) |
|
446 | 447 | ) |
|
447 | 448 | return |
|
448 | 449 | |
|
449 | 450 | with repo.lock(): |
|
450 | 451 | fnc = repo.store.fncache |
|
451 | 452 | fnc.ensureloaded(warn=ui.warn) |
|
452 | 453 | |
|
453 | 454 | oldentries = set(fnc.entries) |
|
454 | 455 | newentries = set() |
|
455 | 456 | seenfiles = set() |
|
456 | 457 | |
|
457 | 458 | progress = ui.makeprogress( |
|
458 | 459 | _(b'rebuilding'), unit=_(b'changesets'), total=len(repo) |
|
459 | 460 | ) |
|
460 | 461 | for rev in repo: |
|
461 | 462 | progress.update(rev) |
|
462 | 463 | |
|
463 | 464 | ctx = repo[rev] |
|
464 | 465 | for f in ctx.files(): |
|
465 | 466 | # This is to minimize I/O. |
|
466 | 467 | if f in seenfiles: |
|
467 | 468 | continue |
|
468 | 469 | seenfiles.add(f) |
|
469 | 470 | |
|
470 | 471 | i = b'data/%s.i' % f |
|
471 | 472 | d = b'data/%s.d' % f |
|
472 | 473 | |
|
473 | 474 | if repo.store._exists(i): |
|
474 | 475 | newentries.add(i) |
|
475 | 476 | if repo.store._exists(d): |
|
476 | 477 | newentries.add(d) |
|
477 | 478 | |
|
478 | 479 | progress.complete() |
|
479 | 480 | |
|
480 | 481 | if requirements.TREEMANIFEST_REQUIREMENT in repo.requirements: |
|
481 | 482 | # This logic is safe if treemanifest isn't enabled, but also |
|
482 | 483 | # pointless, so we skip it if treemanifest isn't enabled. |
|
483 | 484 | for dir in pathutil.dirs(seenfiles): |
|
484 | 485 | i = b'meta/%s/00manifest.i' % dir |
|
485 | 486 | d = b'meta/%s/00manifest.d' % dir |
|
486 | 487 | |
|
487 | 488 | if repo.store._exists(i): |
|
488 | 489 | newentries.add(i) |
|
489 | 490 | if repo.store._exists(d): |
|
490 | 491 | newentries.add(d) |
|
491 | 492 | |
|
492 | 493 | addcount = len(newentries - oldentries) |
|
493 | 494 | removecount = len(oldentries - newentries) |
|
494 | 495 | for p in sorted(oldentries - newentries): |
|
495 | 496 | ui.write(_(b'removing %s\n') % p) |
|
496 | 497 | for p in sorted(newentries - oldentries): |
|
497 | 498 | ui.write(_(b'adding %s\n') % p) |
|
498 | 499 | |
|
499 | 500 | if addcount or removecount: |
|
500 | 501 | ui.write( |
|
501 | 502 | _(b'%d items added, %d removed from fncache\n') |
|
502 | 503 | % (addcount, removecount) |
|
503 | 504 | ) |
|
504 | 505 | fnc.entries = newentries |
|
505 | 506 | fnc._dirty = True |
|
506 | 507 | |
|
507 | 508 | with repo.transaction(b'fncache') as tr: |
|
508 | 509 | fnc.write(tr) |
|
509 | 510 | else: |
|
510 | 511 | ui.write(_(b'fncache already up to date\n')) |
|
511 | 512 | |
|
512 | 513 | |
|
513 | 514 | def deleteobsmarkers(obsstore, indices): |
|
514 | 515 | """Delete some obsmarkers from obsstore and return how many were deleted |
|
515 | 516 | |
|
516 | 517 | 'indices' is a list of ints which are the indices |
|
517 | 518 | of the markers to be deleted. |
|
518 | 519 | |
|
519 | 520 | Every invocation of this function completely rewrites the obsstore file, |
|
520 | 521 | skipping the markers we want to be removed. The new temporary file is |
|
521 | 522 | created, remaining markers are written there and on .close() this file |
|
522 | 523 | gets atomically renamed to obsstore, thus guaranteeing consistency.""" |
|
523 | 524 | if not indices: |
|
524 | 525 | # we don't want to rewrite the obsstore with the same content |
|
525 | 526 | return |
|
526 | 527 | |
|
527 | 528 | left = [] |
|
528 | 529 | current = obsstore._all |
|
529 | 530 | n = 0 |
|
530 | 531 | for i, m in enumerate(current): |
|
531 | 532 | if i in indices: |
|
532 | 533 | n += 1 |
|
533 | 534 | continue |
|
534 | 535 | left.append(m) |
|
535 | 536 | |
|
536 | 537 | newobsstorefile = obsstore.svfs(b'obsstore', b'w', atomictemp=True) |
|
537 | 538 | for bytes in obsolete.encodemarkers(left, True, obsstore._version): |
|
538 | 539 | newobsstorefile.write(bytes) |
|
539 | 540 | newobsstorefile.close() |
|
540 | 541 | return n |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
General Comments 0
You need to be logged in to leave comments.
Login now