##// END OF EJS Templates
wireprotov1peer: don't raise internal errors in some cases...
Valentin Gatien-Baron -
r47430:aa2e3814 default
parent child Browse files
Show More
@@ -1,2329 +1,2331
1 # scmutil.py - Mercurial core utility functions
1 # scmutil.py - Mercurial core utility functions
2 #
2 #
3 # Copyright Matt Mackall <mpm@selenic.com>
3 # Copyright Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import glob
11 import glob
12 import os
12 import os
13 import posixpath
13 import posixpath
14 import re
14 import re
15 import subprocess
15 import subprocess
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 bin,
20 bin,
21 hex,
21 hex,
22 nullid,
22 nullid,
23 nullrev,
23 nullrev,
24 short,
24 short,
25 wdirid,
25 wdirid,
26 wdirrev,
26 wdirrev,
27 )
27 )
28 from .pycompat import getattr
28 from .pycompat import getattr
29 from .thirdparty import attr
29 from .thirdparty import attr
30 from . import (
30 from . import (
31 copies as copiesmod,
31 copies as copiesmod,
32 encoding,
32 encoding,
33 error,
33 error,
34 match as matchmod,
34 match as matchmod,
35 obsolete,
35 obsolete,
36 obsutil,
36 obsutil,
37 pathutil,
37 pathutil,
38 phases,
38 phases,
39 policy,
39 policy,
40 pycompat,
40 pycompat,
41 requirements as requirementsmod,
41 requirements as requirementsmod,
42 revsetlang,
42 revsetlang,
43 similar,
43 similar,
44 smartset,
44 smartset,
45 url,
45 url,
46 util,
46 util,
47 vfs,
47 vfs,
48 )
48 )
49
49
50 from .utils import (
50 from .utils import (
51 hashutil,
51 hashutil,
52 procutil,
52 procutil,
53 stringutil,
53 stringutil,
54 )
54 )
55
55
56 if pycompat.iswindows:
56 if pycompat.iswindows:
57 from . import scmwindows as scmplatform
57 from . import scmwindows as scmplatform
58 else:
58 else:
59 from . import scmposix as scmplatform
59 from . import scmposix as scmplatform
60
60
61 parsers = policy.importmod('parsers')
61 parsers = policy.importmod('parsers')
62 rustrevlog = policy.importrust('revlog')
62 rustrevlog = policy.importrust('revlog')
63
63
64 termsize = scmplatform.termsize
64 termsize = scmplatform.termsize
65
65
66
66
67 @attr.s(slots=True, repr=False)
67 @attr.s(slots=True, repr=False)
68 class status(object):
68 class status(object):
69 """Struct with a list of files per status.
69 """Struct with a list of files per status.
70
70
71 The 'deleted', 'unknown' and 'ignored' properties are only
71 The 'deleted', 'unknown' and 'ignored' properties are only
72 relevant to the working copy.
72 relevant to the working copy.
73 """
73 """
74
74
75 modified = attr.ib(default=attr.Factory(list))
75 modified = attr.ib(default=attr.Factory(list))
76 added = attr.ib(default=attr.Factory(list))
76 added = attr.ib(default=attr.Factory(list))
77 removed = attr.ib(default=attr.Factory(list))
77 removed = attr.ib(default=attr.Factory(list))
78 deleted = attr.ib(default=attr.Factory(list))
78 deleted = attr.ib(default=attr.Factory(list))
79 unknown = attr.ib(default=attr.Factory(list))
79 unknown = attr.ib(default=attr.Factory(list))
80 ignored = attr.ib(default=attr.Factory(list))
80 ignored = attr.ib(default=attr.Factory(list))
81 clean = attr.ib(default=attr.Factory(list))
81 clean = attr.ib(default=attr.Factory(list))
82
82
83 def __iter__(self):
83 def __iter__(self):
84 yield self.modified
84 yield self.modified
85 yield self.added
85 yield self.added
86 yield self.removed
86 yield self.removed
87 yield self.deleted
87 yield self.deleted
88 yield self.unknown
88 yield self.unknown
89 yield self.ignored
89 yield self.ignored
90 yield self.clean
90 yield self.clean
91
91
92 def __repr__(self):
92 def __repr__(self):
93 return (
93 return (
94 r'<status modified=%s, added=%s, removed=%s, deleted=%s, '
94 r'<status modified=%s, added=%s, removed=%s, deleted=%s, '
95 r'unknown=%s, ignored=%s, clean=%s>'
95 r'unknown=%s, ignored=%s, clean=%s>'
96 ) % tuple(pycompat.sysstr(stringutil.pprint(v)) for v in self)
96 ) % tuple(pycompat.sysstr(stringutil.pprint(v)) for v in self)
97
97
98
98
99 def itersubrepos(ctx1, ctx2):
99 def itersubrepos(ctx1, ctx2):
100 """find subrepos in ctx1 or ctx2"""
100 """find subrepos in ctx1 or ctx2"""
101 # Create a (subpath, ctx) mapping where we prefer subpaths from
101 # Create a (subpath, ctx) mapping where we prefer subpaths from
102 # ctx1. The subpaths from ctx2 are important when the .hgsub file
102 # ctx1. The subpaths from ctx2 are important when the .hgsub file
103 # has been modified (in ctx2) but not yet committed (in ctx1).
103 # has been modified (in ctx2) but not yet committed (in ctx1).
104 subpaths = dict.fromkeys(ctx2.substate, ctx2)
104 subpaths = dict.fromkeys(ctx2.substate, ctx2)
105 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
105 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
106
106
107 missing = set()
107 missing = set()
108
108
109 for subpath in ctx2.substate:
109 for subpath in ctx2.substate:
110 if subpath not in ctx1.substate:
110 if subpath not in ctx1.substate:
111 del subpaths[subpath]
111 del subpaths[subpath]
112 missing.add(subpath)
112 missing.add(subpath)
113
113
114 for subpath, ctx in sorted(pycompat.iteritems(subpaths)):
114 for subpath, ctx in sorted(pycompat.iteritems(subpaths)):
115 yield subpath, ctx.sub(subpath)
115 yield subpath, ctx.sub(subpath)
116
116
117 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
117 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
118 # status and diff will have an accurate result when it does
118 # status and diff will have an accurate result when it does
119 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
119 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
120 # against itself.
120 # against itself.
121 for subpath in missing:
121 for subpath in missing:
122 yield subpath, ctx2.nullsub(subpath, ctx1)
122 yield subpath, ctx2.nullsub(subpath, ctx1)
123
123
124
124
125 def nochangesfound(ui, repo, excluded=None):
125 def nochangesfound(ui, repo, excluded=None):
126 """Report no changes for push/pull, excluded is None or a list of
126 """Report no changes for push/pull, excluded is None or a list of
127 nodes excluded from the push/pull.
127 nodes excluded from the push/pull.
128 """
128 """
129 secretlist = []
129 secretlist = []
130 if excluded:
130 if excluded:
131 for n in excluded:
131 for n in excluded:
132 ctx = repo[n]
132 ctx = repo[n]
133 if ctx.phase() >= phases.secret and not ctx.extinct():
133 if ctx.phase() >= phases.secret and not ctx.extinct():
134 secretlist.append(n)
134 secretlist.append(n)
135
135
136 if secretlist:
136 if secretlist:
137 ui.status(
137 ui.status(
138 _(b"no changes found (ignored %d secret changesets)\n")
138 _(b"no changes found (ignored %d secret changesets)\n")
139 % len(secretlist)
139 % len(secretlist)
140 )
140 )
141 else:
141 else:
142 ui.status(_(b"no changes found\n"))
142 ui.status(_(b"no changes found\n"))
143
143
144
144
145 def callcatch(ui, func):
145 def callcatch(ui, func):
146 """call func() with global exception handling
146 """call func() with global exception handling
147
147
148 return func() if no exception happens. otherwise do some error handling
148 return func() if no exception happens. otherwise do some error handling
149 and return an exit code accordingly. does not handle all exceptions.
149 and return an exit code accordingly. does not handle all exceptions.
150 """
150 """
151 coarse_exit_code = -1
151 coarse_exit_code = -1
152 detailed_exit_code = -1
152 detailed_exit_code = -1
153 try:
153 try:
154 try:
154 try:
155 return func()
155 return func()
156 except: # re-raises
156 except: # re-raises
157 ui.traceback()
157 ui.traceback()
158 raise
158 raise
159 # Global exception handling, alphabetically
159 # Global exception handling, alphabetically
160 # Mercurial-specific first, followed by built-in and library exceptions
160 # Mercurial-specific first, followed by built-in and library exceptions
161 except error.LockHeld as inst:
161 except error.LockHeld as inst:
162 detailed_exit_code = 20
162 detailed_exit_code = 20
163 if inst.errno == errno.ETIMEDOUT:
163 if inst.errno == errno.ETIMEDOUT:
164 reason = _(b'timed out waiting for lock held by %r') % (
164 reason = _(b'timed out waiting for lock held by %r') % (
165 pycompat.bytestr(inst.locker)
165 pycompat.bytestr(inst.locker)
166 )
166 )
167 else:
167 else:
168 reason = _(b'lock held by %r') % inst.locker
168 reason = _(b'lock held by %r') % inst.locker
169 ui.error(
169 ui.error(
170 _(b"abort: %s: %s\n")
170 _(b"abort: %s: %s\n")
171 % (inst.desc or stringutil.forcebytestr(inst.filename), reason)
171 % (inst.desc or stringutil.forcebytestr(inst.filename), reason)
172 )
172 )
173 if not inst.locker:
173 if not inst.locker:
174 ui.error(_(b"(lock might be very busy)\n"))
174 ui.error(_(b"(lock might be very busy)\n"))
175 except error.LockUnavailable as inst:
175 except error.LockUnavailable as inst:
176 detailed_exit_code = 20
176 detailed_exit_code = 20
177 ui.error(
177 ui.error(
178 _(b"abort: could not lock %s: %s\n")
178 _(b"abort: could not lock %s: %s\n")
179 % (
179 % (
180 inst.desc or stringutil.forcebytestr(inst.filename),
180 inst.desc or stringutil.forcebytestr(inst.filename),
181 encoding.strtolocal(inst.strerror),
181 encoding.strtolocal(inst.strerror),
182 )
182 )
183 )
183 )
184 except error.OutOfBandError as inst:
184 except error.OutOfBandError as inst:
185 detailed_exit_code = 100
185 detailed_exit_code = 100
186 if inst.args:
186 if inst.args:
187 msg = _(b"abort: remote error:\n")
187 msg = _(b"abort: remote error:\n")
188 else:
188 else:
189 msg = _(b"abort: remote error\n")
189 msg = _(b"abort: remote error\n")
190 ui.error(msg)
190 ui.error(msg)
191 if inst.args:
191 if inst.args:
192 ui.error(b''.join(inst.args))
192 ui.error(b''.join(inst.args))
193 if inst.hint:
193 if inst.hint:
194 ui.error(b'(%s)\n' % inst.hint)
194 ui.error(b'(%s)\n' % inst.hint)
195 except error.RepoError as inst:
195 except error.RepoError as inst:
196 ui.error(_(b"abort: %s\n") % inst)
196 ui.error(_(b"abort: %s\n") % inst)
197 if inst.hint:
197 if inst.hint:
198 ui.error(_(b"(%s)\n") % inst.hint)
198 ui.error(_(b"(%s)\n") % inst.hint)
199 except error.ResponseError as inst:
199 except error.ResponseError as inst:
200 ui.error(_(b"abort: %s") % inst.args[0])
200 ui.error(_(b"abort: %s") % inst.args[0])
201 msg = inst.args[1]
201 msg = inst.args[1]
202 if isinstance(msg, type(u'')):
202 if isinstance(msg, type(u'')):
203 msg = pycompat.sysbytes(msg)
203 msg = pycompat.sysbytes(msg)
204 if not isinstance(msg, bytes):
204 if msg is None:
205 ui.error(b"\n")
206 elif not isinstance(msg, bytes):
205 ui.error(b" %r\n" % (msg,))
207 ui.error(b" %r\n" % (msg,))
206 elif not msg:
208 elif not msg:
207 ui.error(_(b" empty string\n"))
209 ui.error(_(b" empty string\n"))
208 else:
210 else:
209 ui.error(b"\n%r\n" % pycompat.bytestr(stringutil.ellipsis(msg)))
211 ui.error(b"\n%r\n" % pycompat.bytestr(stringutil.ellipsis(msg)))
210 except error.CensoredNodeError as inst:
212 except error.CensoredNodeError as inst:
211 ui.error(_(b"abort: file censored %s\n") % inst)
213 ui.error(_(b"abort: file censored %s\n") % inst)
212 except error.StorageError as inst:
214 except error.StorageError as inst:
213 ui.error(_(b"abort: %s\n") % inst)
215 ui.error(_(b"abort: %s\n") % inst)
214 if inst.hint:
216 if inst.hint:
215 ui.error(_(b"(%s)\n") % inst.hint)
217 ui.error(_(b"(%s)\n") % inst.hint)
216 detailed_exit_code = 50
218 detailed_exit_code = 50
217 except error.InterventionRequired as inst:
219 except error.InterventionRequired as inst:
218 ui.error(b"%s\n" % inst)
220 ui.error(b"%s\n" % inst)
219 if inst.hint:
221 if inst.hint:
220 ui.error(_(b"(%s)\n") % inst.hint)
222 ui.error(_(b"(%s)\n") % inst.hint)
221 detailed_exit_code = 240
223 detailed_exit_code = 240
222 coarse_exit_code = 1
224 coarse_exit_code = 1
223 except error.WdirUnsupported:
225 except error.WdirUnsupported:
224 ui.error(_(b"abort: working directory revision cannot be specified\n"))
226 ui.error(_(b"abort: working directory revision cannot be specified\n"))
225 except error.Abort as inst:
227 except error.Abort as inst:
226 if isinstance(inst, (error.InputError, error.ParseError)):
228 if isinstance(inst, (error.InputError, error.ParseError)):
227 detailed_exit_code = 10
229 detailed_exit_code = 10
228 elif isinstance(inst, error.StateError):
230 elif isinstance(inst, error.StateError):
229 detailed_exit_code = 20
231 detailed_exit_code = 20
230 elif isinstance(inst, error.ConfigError):
232 elif isinstance(inst, error.ConfigError):
231 detailed_exit_code = 30
233 detailed_exit_code = 30
232 elif isinstance(inst, error.HookAbort):
234 elif isinstance(inst, error.HookAbort):
233 detailed_exit_code = 40
235 detailed_exit_code = 40
234 elif isinstance(inst, error.SecurityError):
236 elif isinstance(inst, error.SecurityError):
235 detailed_exit_code = 150
237 detailed_exit_code = 150
236 elif isinstance(inst, error.CanceledError):
238 elif isinstance(inst, error.CanceledError):
237 detailed_exit_code = 250
239 detailed_exit_code = 250
238 ui.error(inst.format())
240 ui.error(inst.format())
239 except error.WorkerError as inst:
241 except error.WorkerError as inst:
240 # Don't print a message -- the worker already should have
242 # Don't print a message -- the worker already should have
241 return inst.status_code
243 return inst.status_code
242 except ImportError as inst:
244 except ImportError as inst:
243 ui.error(_(b"abort: %s\n") % stringutil.forcebytestr(inst))
245 ui.error(_(b"abort: %s\n") % stringutil.forcebytestr(inst))
244 m = stringutil.forcebytestr(inst).split()[-1]
246 m = stringutil.forcebytestr(inst).split()[-1]
245 if m in b"mpatch bdiff".split():
247 if m in b"mpatch bdiff".split():
246 ui.error(_(b"(did you forget to compile extensions?)\n"))
248 ui.error(_(b"(did you forget to compile extensions?)\n"))
247 elif m in b"zlib".split():
249 elif m in b"zlib".split():
248 ui.error(_(b"(is your Python install correct?)\n"))
250 ui.error(_(b"(is your Python install correct?)\n"))
249 except util.urlerr.httperror as inst:
251 except util.urlerr.httperror as inst:
250 detailed_exit_code = 100
252 detailed_exit_code = 100
251 ui.error(_(b"abort: %s\n") % stringutil.forcebytestr(inst))
253 ui.error(_(b"abort: %s\n") % stringutil.forcebytestr(inst))
252 except util.urlerr.urlerror as inst:
254 except util.urlerr.urlerror as inst:
253 detailed_exit_code = 100
255 detailed_exit_code = 100
254 try: # usually it is in the form (errno, strerror)
256 try: # usually it is in the form (errno, strerror)
255 reason = inst.reason.args[1]
257 reason = inst.reason.args[1]
256 except (AttributeError, IndexError):
258 except (AttributeError, IndexError):
257 # it might be anything, for example a string
259 # it might be anything, for example a string
258 reason = inst.reason
260 reason = inst.reason
259 if isinstance(reason, pycompat.unicode):
261 if isinstance(reason, pycompat.unicode):
260 # SSLError of Python 2.7.9 contains a unicode
262 # SSLError of Python 2.7.9 contains a unicode
261 reason = encoding.unitolocal(reason)
263 reason = encoding.unitolocal(reason)
262 ui.error(_(b"abort: error: %s\n") % stringutil.forcebytestr(reason))
264 ui.error(_(b"abort: error: %s\n") % stringutil.forcebytestr(reason))
263 except (IOError, OSError) as inst:
265 except (IOError, OSError) as inst:
264 if (
266 if (
265 util.safehasattr(inst, b"args")
267 util.safehasattr(inst, b"args")
266 and inst.args
268 and inst.args
267 and inst.args[0] == errno.EPIPE
269 and inst.args[0] == errno.EPIPE
268 ):
270 ):
269 pass
271 pass
270 elif getattr(inst, "strerror", None): # common IOError or OSError
272 elif getattr(inst, "strerror", None): # common IOError or OSError
271 if getattr(inst, "filename", None) is not None:
273 if getattr(inst, "filename", None) is not None:
272 ui.error(
274 ui.error(
273 _(b"abort: %s: '%s'\n")
275 _(b"abort: %s: '%s'\n")
274 % (
276 % (
275 encoding.strtolocal(inst.strerror),
277 encoding.strtolocal(inst.strerror),
276 stringutil.forcebytestr(inst.filename),
278 stringutil.forcebytestr(inst.filename),
277 )
279 )
278 )
280 )
279 else:
281 else:
280 ui.error(_(b"abort: %s\n") % encoding.strtolocal(inst.strerror))
282 ui.error(_(b"abort: %s\n") % encoding.strtolocal(inst.strerror))
281 else: # suspicious IOError
283 else: # suspicious IOError
282 raise
284 raise
283 except MemoryError:
285 except MemoryError:
284 ui.error(_(b"abort: out of memory\n"))
286 ui.error(_(b"abort: out of memory\n"))
285 except SystemExit as inst:
287 except SystemExit as inst:
286 # Commands shouldn't sys.exit directly, but give a return code.
288 # Commands shouldn't sys.exit directly, but give a return code.
287 # Just in case catch this and and pass exit code to caller.
289 # Just in case catch this and and pass exit code to caller.
288 detailed_exit_code = 254
290 detailed_exit_code = 254
289 coarse_exit_code = inst.code
291 coarse_exit_code = inst.code
290
292
291 if ui.configbool(b'ui', b'detailed-exit-code'):
293 if ui.configbool(b'ui', b'detailed-exit-code'):
292 return detailed_exit_code
294 return detailed_exit_code
293 else:
295 else:
294 return coarse_exit_code
296 return coarse_exit_code
295
297
296
298
297 def checknewlabel(repo, lbl, kind):
299 def checknewlabel(repo, lbl, kind):
298 # Do not use the "kind" parameter in ui output.
300 # Do not use the "kind" parameter in ui output.
299 # It makes strings difficult to translate.
301 # It makes strings difficult to translate.
300 if lbl in [b'tip', b'.', b'null']:
302 if lbl in [b'tip', b'.', b'null']:
301 raise error.InputError(_(b"the name '%s' is reserved") % lbl)
303 raise error.InputError(_(b"the name '%s' is reserved") % lbl)
302 for c in (b':', b'\0', b'\n', b'\r'):
304 for c in (b':', b'\0', b'\n', b'\r'):
303 if c in lbl:
305 if c in lbl:
304 raise error.InputError(
306 raise error.InputError(
305 _(b"%r cannot be used in a name") % pycompat.bytestr(c)
307 _(b"%r cannot be used in a name") % pycompat.bytestr(c)
306 )
308 )
307 try:
309 try:
308 int(lbl)
310 int(lbl)
309 raise error.InputError(_(b"cannot use an integer as a name"))
311 raise error.InputError(_(b"cannot use an integer as a name"))
310 except ValueError:
312 except ValueError:
311 pass
313 pass
312 if lbl.strip() != lbl:
314 if lbl.strip() != lbl:
313 raise error.InputError(
315 raise error.InputError(
314 _(b"leading or trailing whitespace in name %r") % lbl
316 _(b"leading or trailing whitespace in name %r") % lbl
315 )
317 )
316
318
317
319
318 def checkfilename(f):
320 def checkfilename(f):
319 '''Check that the filename f is an acceptable filename for a tracked file'''
321 '''Check that the filename f is an acceptable filename for a tracked file'''
320 if b'\r' in f or b'\n' in f:
322 if b'\r' in f or b'\n' in f:
321 raise error.InputError(
323 raise error.InputError(
322 _(b"'\\n' and '\\r' disallowed in filenames: %r")
324 _(b"'\\n' and '\\r' disallowed in filenames: %r")
323 % pycompat.bytestr(f)
325 % pycompat.bytestr(f)
324 )
326 )
325
327
326
328
327 def checkportable(ui, f):
329 def checkportable(ui, f):
328 '''Check if filename f is portable and warn or abort depending on config'''
330 '''Check if filename f is portable and warn or abort depending on config'''
329 checkfilename(f)
331 checkfilename(f)
330 abort, warn = checkportabilityalert(ui)
332 abort, warn = checkportabilityalert(ui)
331 if abort or warn:
333 if abort or warn:
332 msg = util.checkwinfilename(f)
334 msg = util.checkwinfilename(f)
333 if msg:
335 if msg:
334 msg = b"%s: %s" % (msg, procutil.shellquote(f))
336 msg = b"%s: %s" % (msg, procutil.shellquote(f))
335 if abort:
337 if abort:
336 raise error.InputError(msg)
338 raise error.InputError(msg)
337 ui.warn(_(b"warning: %s\n") % msg)
339 ui.warn(_(b"warning: %s\n") % msg)
338
340
339
341
340 def checkportabilityalert(ui):
342 def checkportabilityalert(ui):
341 """check if the user's config requests nothing, a warning, or abort for
343 """check if the user's config requests nothing, a warning, or abort for
342 non-portable filenames"""
344 non-portable filenames"""
343 val = ui.config(b'ui', b'portablefilenames')
345 val = ui.config(b'ui', b'portablefilenames')
344 lval = val.lower()
346 lval = val.lower()
345 bval = stringutil.parsebool(val)
347 bval = stringutil.parsebool(val)
346 abort = pycompat.iswindows or lval == b'abort'
348 abort = pycompat.iswindows or lval == b'abort'
347 warn = bval or lval == b'warn'
349 warn = bval or lval == b'warn'
348 if bval is None and not (warn or abort or lval == b'ignore'):
350 if bval is None and not (warn or abort or lval == b'ignore'):
349 raise error.ConfigError(
351 raise error.ConfigError(
350 _(b"ui.portablefilenames value is invalid ('%s')") % val
352 _(b"ui.portablefilenames value is invalid ('%s')") % val
351 )
353 )
352 return abort, warn
354 return abort, warn
353
355
354
356
355 class casecollisionauditor(object):
357 class casecollisionauditor(object):
356 def __init__(self, ui, abort, dirstate):
358 def __init__(self, ui, abort, dirstate):
357 self._ui = ui
359 self._ui = ui
358 self._abort = abort
360 self._abort = abort
359 allfiles = b'\0'.join(dirstate)
361 allfiles = b'\0'.join(dirstate)
360 self._loweredfiles = set(encoding.lower(allfiles).split(b'\0'))
362 self._loweredfiles = set(encoding.lower(allfiles).split(b'\0'))
361 self._dirstate = dirstate
363 self._dirstate = dirstate
362 # The purpose of _newfiles is so that we don't complain about
364 # The purpose of _newfiles is so that we don't complain about
363 # case collisions if someone were to call this object with the
365 # case collisions if someone were to call this object with the
364 # same filename twice.
366 # same filename twice.
365 self._newfiles = set()
367 self._newfiles = set()
366
368
367 def __call__(self, f):
369 def __call__(self, f):
368 if f in self._newfiles:
370 if f in self._newfiles:
369 return
371 return
370 fl = encoding.lower(f)
372 fl = encoding.lower(f)
371 if fl in self._loweredfiles and f not in self._dirstate:
373 if fl in self._loweredfiles and f not in self._dirstate:
372 msg = _(b'possible case-folding collision for %s') % f
374 msg = _(b'possible case-folding collision for %s') % f
373 if self._abort:
375 if self._abort:
374 raise error.Abort(msg)
376 raise error.Abort(msg)
375 self._ui.warn(_(b"warning: %s\n") % msg)
377 self._ui.warn(_(b"warning: %s\n") % msg)
376 self._loweredfiles.add(fl)
378 self._loweredfiles.add(fl)
377 self._newfiles.add(f)
379 self._newfiles.add(f)
378
380
379
381
380 def filteredhash(repo, maxrev):
382 def filteredhash(repo, maxrev):
381 """build hash of filtered revisions in the current repoview.
383 """build hash of filtered revisions in the current repoview.
382
384
383 Multiple caches perform up-to-date validation by checking that the
385 Multiple caches perform up-to-date validation by checking that the
384 tiprev and tipnode stored in the cache file match the current repository.
386 tiprev and tipnode stored in the cache file match the current repository.
385 However, this is not sufficient for validating repoviews because the set
387 However, this is not sufficient for validating repoviews because the set
386 of revisions in the view may change without the repository tiprev and
388 of revisions in the view may change without the repository tiprev and
387 tipnode changing.
389 tipnode changing.
388
390
389 This function hashes all the revs filtered from the view and returns
391 This function hashes all the revs filtered from the view and returns
390 that SHA-1 digest.
392 that SHA-1 digest.
391 """
393 """
392 cl = repo.changelog
394 cl = repo.changelog
393 if not cl.filteredrevs:
395 if not cl.filteredrevs:
394 return None
396 return None
395 key = cl._filteredrevs_hashcache.get(maxrev)
397 key = cl._filteredrevs_hashcache.get(maxrev)
396 if not key:
398 if not key:
397 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
399 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
398 if revs:
400 if revs:
399 s = hashutil.sha1()
401 s = hashutil.sha1()
400 for rev in revs:
402 for rev in revs:
401 s.update(b'%d;' % rev)
403 s.update(b'%d;' % rev)
402 key = s.digest()
404 key = s.digest()
403 cl._filteredrevs_hashcache[maxrev] = key
405 cl._filteredrevs_hashcache[maxrev] = key
404 return key
406 return key
405
407
406
408
407 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
409 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
408 """yield every hg repository under path, always recursively.
410 """yield every hg repository under path, always recursively.
409 The recurse flag will only control recursion into repo working dirs"""
411 The recurse flag will only control recursion into repo working dirs"""
410
412
411 def errhandler(err):
413 def errhandler(err):
412 if err.filename == path:
414 if err.filename == path:
413 raise err
415 raise err
414
416
415 samestat = getattr(os.path, 'samestat', None)
417 samestat = getattr(os.path, 'samestat', None)
416 if followsym and samestat is not None:
418 if followsym and samestat is not None:
417
419
418 def adddir(dirlst, dirname):
420 def adddir(dirlst, dirname):
419 dirstat = os.stat(dirname)
421 dirstat = os.stat(dirname)
420 match = any(samestat(dirstat, lstdirstat) for lstdirstat in dirlst)
422 match = any(samestat(dirstat, lstdirstat) for lstdirstat in dirlst)
421 if not match:
423 if not match:
422 dirlst.append(dirstat)
424 dirlst.append(dirstat)
423 return not match
425 return not match
424
426
425 else:
427 else:
426 followsym = False
428 followsym = False
427
429
428 if (seen_dirs is None) and followsym:
430 if (seen_dirs is None) and followsym:
429 seen_dirs = []
431 seen_dirs = []
430 adddir(seen_dirs, path)
432 adddir(seen_dirs, path)
431 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
433 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
432 dirs.sort()
434 dirs.sort()
433 if b'.hg' in dirs:
435 if b'.hg' in dirs:
434 yield root # found a repository
436 yield root # found a repository
435 qroot = os.path.join(root, b'.hg', b'patches')
437 qroot = os.path.join(root, b'.hg', b'patches')
436 if os.path.isdir(os.path.join(qroot, b'.hg')):
438 if os.path.isdir(os.path.join(qroot, b'.hg')):
437 yield qroot # we have a patch queue repo here
439 yield qroot # we have a patch queue repo here
438 if recurse:
440 if recurse:
439 # avoid recursing inside the .hg directory
441 # avoid recursing inside the .hg directory
440 dirs.remove(b'.hg')
442 dirs.remove(b'.hg')
441 else:
443 else:
442 dirs[:] = [] # don't descend further
444 dirs[:] = [] # don't descend further
443 elif followsym:
445 elif followsym:
444 newdirs = []
446 newdirs = []
445 for d in dirs:
447 for d in dirs:
446 fname = os.path.join(root, d)
448 fname = os.path.join(root, d)
447 if adddir(seen_dirs, fname):
449 if adddir(seen_dirs, fname):
448 if os.path.islink(fname):
450 if os.path.islink(fname):
449 for hgname in walkrepos(fname, True, seen_dirs):
451 for hgname in walkrepos(fname, True, seen_dirs):
450 yield hgname
452 yield hgname
451 else:
453 else:
452 newdirs.append(d)
454 newdirs.append(d)
453 dirs[:] = newdirs
455 dirs[:] = newdirs
454
456
455
457
456 def binnode(ctx):
458 def binnode(ctx):
457 """Return binary node id for a given basectx"""
459 """Return binary node id for a given basectx"""
458 node = ctx.node()
460 node = ctx.node()
459 if node is None:
461 if node is None:
460 return wdirid
462 return wdirid
461 return node
463 return node
462
464
463
465
464 def intrev(ctx):
466 def intrev(ctx):
465 """Return integer for a given basectx that can be used in comparison or
467 """Return integer for a given basectx that can be used in comparison or
466 arithmetic operation"""
468 arithmetic operation"""
467 rev = ctx.rev()
469 rev = ctx.rev()
468 if rev is None:
470 if rev is None:
469 return wdirrev
471 return wdirrev
470 return rev
472 return rev
471
473
472
474
473 def formatchangeid(ctx):
475 def formatchangeid(ctx):
474 """Format changectx as '{rev}:{node|formatnode}', which is the default
476 """Format changectx as '{rev}:{node|formatnode}', which is the default
475 template provided by logcmdutil.changesettemplater"""
477 template provided by logcmdutil.changesettemplater"""
476 repo = ctx.repo()
478 repo = ctx.repo()
477 return formatrevnode(repo.ui, intrev(ctx), binnode(ctx))
479 return formatrevnode(repo.ui, intrev(ctx), binnode(ctx))
478
480
479
481
480 def formatrevnode(ui, rev, node):
482 def formatrevnode(ui, rev, node):
481 """Format given revision and node depending on the current verbosity"""
483 """Format given revision and node depending on the current verbosity"""
482 if ui.debugflag:
484 if ui.debugflag:
483 hexfunc = hex
485 hexfunc = hex
484 else:
486 else:
485 hexfunc = short
487 hexfunc = short
486 return b'%d:%s' % (rev, hexfunc(node))
488 return b'%d:%s' % (rev, hexfunc(node))
487
489
488
490
489 def resolvehexnodeidprefix(repo, prefix):
491 def resolvehexnodeidprefix(repo, prefix):
490 if prefix.startswith(b'x'):
492 if prefix.startswith(b'x'):
491 prefix = prefix[1:]
493 prefix = prefix[1:]
492 try:
494 try:
493 # Uses unfiltered repo because it's faster when prefix is ambiguous/
495 # Uses unfiltered repo because it's faster when prefix is ambiguous/
494 # This matches the shortesthexnodeidprefix() function below.
496 # This matches the shortesthexnodeidprefix() function below.
495 node = repo.unfiltered().changelog._partialmatch(prefix)
497 node = repo.unfiltered().changelog._partialmatch(prefix)
496 except error.AmbiguousPrefixLookupError:
498 except error.AmbiguousPrefixLookupError:
497 revset = repo.ui.config(
499 revset = repo.ui.config(
498 b'experimental', b'revisions.disambiguatewithin'
500 b'experimental', b'revisions.disambiguatewithin'
499 )
501 )
500 if revset:
502 if revset:
501 # Clear config to avoid infinite recursion
503 # Clear config to avoid infinite recursion
502 configoverrides = {
504 configoverrides = {
503 (b'experimental', b'revisions.disambiguatewithin'): None
505 (b'experimental', b'revisions.disambiguatewithin'): None
504 }
506 }
505 with repo.ui.configoverride(configoverrides):
507 with repo.ui.configoverride(configoverrides):
506 revs = repo.anyrevs([revset], user=True)
508 revs = repo.anyrevs([revset], user=True)
507 matches = []
509 matches = []
508 for rev in revs:
510 for rev in revs:
509 node = repo.changelog.node(rev)
511 node = repo.changelog.node(rev)
510 if hex(node).startswith(prefix):
512 if hex(node).startswith(prefix):
511 matches.append(node)
513 matches.append(node)
512 if len(matches) == 1:
514 if len(matches) == 1:
513 return matches[0]
515 return matches[0]
514 raise
516 raise
515 if node is None:
517 if node is None:
516 return
518 return
517 repo.changelog.rev(node) # make sure node isn't filtered
519 repo.changelog.rev(node) # make sure node isn't filtered
518 return node
520 return node
519
521
520
522
521 def mayberevnum(repo, prefix):
523 def mayberevnum(repo, prefix):
522 """Checks if the given prefix may be mistaken for a revision number"""
524 """Checks if the given prefix may be mistaken for a revision number"""
523 try:
525 try:
524 i = int(prefix)
526 i = int(prefix)
525 # if we are a pure int, then starting with zero will not be
527 # if we are a pure int, then starting with zero will not be
526 # confused as a rev; or, obviously, if the int is larger
528 # confused as a rev; or, obviously, if the int is larger
527 # than the value of the tip rev. We still need to disambiguate if
529 # than the value of the tip rev. We still need to disambiguate if
528 # prefix == '0', since that *is* a valid revnum.
530 # prefix == '0', since that *is* a valid revnum.
529 if (prefix != b'0' and prefix[0:1] == b'0') or i >= len(repo):
531 if (prefix != b'0' and prefix[0:1] == b'0') or i >= len(repo):
530 return False
532 return False
531 return True
533 return True
532 except ValueError:
534 except ValueError:
533 return False
535 return False
534
536
535
537
536 def shortesthexnodeidprefix(repo, node, minlength=1, cache=None):
538 def shortesthexnodeidprefix(repo, node, minlength=1, cache=None):
537 """Find the shortest unambiguous prefix that matches hexnode.
539 """Find the shortest unambiguous prefix that matches hexnode.
538
540
539 If "cache" is not None, it must be a dictionary that can be used for
541 If "cache" is not None, it must be a dictionary that can be used for
540 caching between calls to this method.
542 caching between calls to this method.
541 """
543 """
542 # _partialmatch() of filtered changelog could take O(len(repo)) time,
544 # _partialmatch() of filtered changelog could take O(len(repo)) time,
543 # which would be unacceptably slow. so we look for hash collision in
545 # which would be unacceptably slow. so we look for hash collision in
544 # unfiltered space, which means some hashes may be slightly longer.
546 # unfiltered space, which means some hashes may be slightly longer.
545
547
546 minlength = max(minlength, 1)
548 minlength = max(minlength, 1)
547
549
548 def disambiguate(prefix):
550 def disambiguate(prefix):
549 """Disambiguate against revnums."""
551 """Disambiguate against revnums."""
550 if repo.ui.configbool(b'experimental', b'revisions.prefixhexnode'):
552 if repo.ui.configbool(b'experimental', b'revisions.prefixhexnode'):
551 if mayberevnum(repo, prefix):
553 if mayberevnum(repo, prefix):
552 return b'x' + prefix
554 return b'x' + prefix
553 else:
555 else:
554 return prefix
556 return prefix
555
557
556 hexnode = hex(node)
558 hexnode = hex(node)
557 for length in range(len(prefix), len(hexnode) + 1):
559 for length in range(len(prefix), len(hexnode) + 1):
558 prefix = hexnode[:length]
560 prefix = hexnode[:length]
559 if not mayberevnum(repo, prefix):
561 if not mayberevnum(repo, prefix):
560 return prefix
562 return prefix
561
563
562 cl = repo.unfiltered().changelog
564 cl = repo.unfiltered().changelog
563 revset = repo.ui.config(b'experimental', b'revisions.disambiguatewithin')
565 revset = repo.ui.config(b'experimental', b'revisions.disambiguatewithin')
564 if revset:
566 if revset:
565 revs = None
567 revs = None
566 if cache is not None:
568 if cache is not None:
567 revs = cache.get(b'disambiguationrevset')
569 revs = cache.get(b'disambiguationrevset')
568 if revs is None:
570 if revs is None:
569 revs = repo.anyrevs([revset], user=True)
571 revs = repo.anyrevs([revset], user=True)
570 if cache is not None:
572 if cache is not None:
571 cache[b'disambiguationrevset'] = revs
573 cache[b'disambiguationrevset'] = revs
572 if cl.rev(node) in revs:
574 if cl.rev(node) in revs:
573 hexnode = hex(node)
575 hexnode = hex(node)
574 nodetree = None
576 nodetree = None
575 if cache is not None:
577 if cache is not None:
576 nodetree = cache.get(b'disambiguationnodetree')
578 nodetree = cache.get(b'disambiguationnodetree')
577 if not nodetree:
579 if not nodetree:
578 if util.safehasattr(parsers, 'nodetree'):
580 if util.safehasattr(parsers, 'nodetree'):
579 # The CExt is the only implementation to provide a nodetree
581 # The CExt is the only implementation to provide a nodetree
580 # class so far.
582 # class so far.
581 index = cl.index
583 index = cl.index
582 if util.safehasattr(index, 'get_cindex'):
584 if util.safehasattr(index, 'get_cindex'):
583 # the rust wrapped need to give access to its internal index
585 # the rust wrapped need to give access to its internal index
584 index = index.get_cindex()
586 index = index.get_cindex()
585 nodetree = parsers.nodetree(index, len(revs))
587 nodetree = parsers.nodetree(index, len(revs))
586 for r in revs:
588 for r in revs:
587 nodetree.insert(r)
589 nodetree.insert(r)
588 if cache is not None:
590 if cache is not None:
589 cache[b'disambiguationnodetree'] = nodetree
591 cache[b'disambiguationnodetree'] = nodetree
590 if nodetree is not None:
592 if nodetree is not None:
591 length = max(nodetree.shortest(node), minlength)
593 length = max(nodetree.shortest(node), minlength)
592 prefix = hexnode[:length]
594 prefix = hexnode[:length]
593 return disambiguate(prefix)
595 return disambiguate(prefix)
594 for length in range(minlength, len(hexnode) + 1):
596 for length in range(minlength, len(hexnode) + 1):
595 matches = []
597 matches = []
596 prefix = hexnode[:length]
598 prefix = hexnode[:length]
597 for rev in revs:
599 for rev in revs:
598 otherhexnode = repo[rev].hex()
600 otherhexnode = repo[rev].hex()
599 if prefix == otherhexnode[:length]:
601 if prefix == otherhexnode[:length]:
600 matches.append(otherhexnode)
602 matches.append(otherhexnode)
601 if len(matches) == 1:
603 if len(matches) == 1:
602 return disambiguate(prefix)
604 return disambiguate(prefix)
603
605
604 try:
606 try:
605 return disambiguate(cl.shortest(node, minlength))
607 return disambiguate(cl.shortest(node, minlength))
606 except error.LookupError:
608 except error.LookupError:
607 raise error.RepoLookupError()
609 raise error.RepoLookupError()
608
610
609
611
610 def isrevsymbol(repo, symbol):
612 def isrevsymbol(repo, symbol):
611 """Checks if a symbol exists in the repo.
613 """Checks if a symbol exists in the repo.
612
614
613 See revsymbol() for details. Raises error.AmbiguousPrefixLookupError if the
615 See revsymbol() for details. Raises error.AmbiguousPrefixLookupError if the
614 symbol is an ambiguous nodeid prefix.
616 symbol is an ambiguous nodeid prefix.
615 """
617 """
616 try:
618 try:
617 revsymbol(repo, symbol)
619 revsymbol(repo, symbol)
618 return True
620 return True
619 except error.RepoLookupError:
621 except error.RepoLookupError:
620 return False
622 return False
621
623
622
624
623 def revsymbol(repo, symbol):
625 def revsymbol(repo, symbol):
624 """Returns a context given a single revision symbol (as string).
626 """Returns a context given a single revision symbol (as string).
625
627
626 This is similar to revsingle(), but accepts only a single revision symbol,
628 This is similar to revsingle(), but accepts only a single revision symbol,
627 i.e. things like ".", "tip", "1234", "deadbeef", "my-bookmark" work, but
629 i.e. things like ".", "tip", "1234", "deadbeef", "my-bookmark" work, but
628 not "max(public())".
630 not "max(public())".
629 """
631 """
630 if not isinstance(symbol, bytes):
632 if not isinstance(symbol, bytes):
631 msg = (
633 msg = (
632 b"symbol (%s of type %s) was not a string, did you mean "
634 b"symbol (%s of type %s) was not a string, did you mean "
633 b"repo[symbol]?" % (symbol, type(symbol))
635 b"repo[symbol]?" % (symbol, type(symbol))
634 )
636 )
635 raise error.ProgrammingError(msg)
637 raise error.ProgrammingError(msg)
636 try:
638 try:
637 if symbol in (b'.', b'tip', b'null'):
639 if symbol in (b'.', b'tip', b'null'):
638 return repo[symbol]
640 return repo[symbol]
639
641
640 try:
642 try:
641 r = int(symbol)
643 r = int(symbol)
642 if b'%d' % r != symbol:
644 if b'%d' % r != symbol:
643 raise ValueError
645 raise ValueError
644 l = len(repo.changelog)
646 l = len(repo.changelog)
645 if r < 0:
647 if r < 0:
646 r += l
648 r += l
647 if r < 0 or r >= l and r != wdirrev:
649 if r < 0 or r >= l and r != wdirrev:
648 raise ValueError
650 raise ValueError
649 return repo[r]
651 return repo[r]
650 except error.FilteredIndexError:
652 except error.FilteredIndexError:
651 raise
653 raise
652 except (ValueError, OverflowError, IndexError):
654 except (ValueError, OverflowError, IndexError):
653 pass
655 pass
654
656
655 if len(symbol) == 40:
657 if len(symbol) == 40:
656 try:
658 try:
657 node = bin(symbol)
659 node = bin(symbol)
658 rev = repo.changelog.rev(node)
660 rev = repo.changelog.rev(node)
659 return repo[rev]
661 return repo[rev]
660 except error.FilteredLookupError:
662 except error.FilteredLookupError:
661 raise
663 raise
662 except (TypeError, LookupError):
664 except (TypeError, LookupError):
663 pass
665 pass
664
666
665 # look up bookmarks through the name interface
667 # look up bookmarks through the name interface
666 try:
668 try:
667 node = repo.names.singlenode(repo, symbol)
669 node = repo.names.singlenode(repo, symbol)
668 rev = repo.changelog.rev(node)
670 rev = repo.changelog.rev(node)
669 return repo[rev]
671 return repo[rev]
670 except KeyError:
672 except KeyError:
671 pass
673 pass
672
674
673 node = resolvehexnodeidprefix(repo, symbol)
675 node = resolvehexnodeidprefix(repo, symbol)
674 if node is not None:
676 if node is not None:
675 rev = repo.changelog.rev(node)
677 rev = repo.changelog.rev(node)
676 return repo[rev]
678 return repo[rev]
677
679
678 raise error.RepoLookupError(_(b"unknown revision '%s'") % symbol)
680 raise error.RepoLookupError(_(b"unknown revision '%s'") % symbol)
679
681
680 except error.WdirUnsupported:
682 except error.WdirUnsupported:
681 return repo[None]
683 return repo[None]
682 except (
684 except (
683 error.FilteredIndexError,
685 error.FilteredIndexError,
684 error.FilteredLookupError,
686 error.FilteredLookupError,
685 error.FilteredRepoLookupError,
687 error.FilteredRepoLookupError,
686 ):
688 ):
687 raise _filterederror(repo, symbol)
689 raise _filterederror(repo, symbol)
688
690
689
691
690 def _filterederror(repo, changeid):
692 def _filterederror(repo, changeid):
691 """build an exception to be raised about a filtered changeid
693 """build an exception to be raised about a filtered changeid
692
694
693 This is extracted in a function to help extensions (eg: evolve) to
695 This is extracted in a function to help extensions (eg: evolve) to
694 experiment with various message variants."""
696 experiment with various message variants."""
695 if repo.filtername.startswith(b'visible'):
697 if repo.filtername.startswith(b'visible'):
696
698
697 # Check if the changeset is obsolete
699 # Check if the changeset is obsolete
698 unfilteredrepo = repo.unfiltered()
700 unfilteredrepo = repo.unfiltered()
699 ctx = revsymbol(unfilteredrepo, changeid)
701 ctx = revsymbol(unfilteredrepo, changeid)
700
702
701 # If the changeset is obsolete, enrich the message with the reason
703 # If the changeset is obsolete, enrich the message with the reason
702 # that made this changeset not visible
704 # that made this changeset not visible
703 if ctx.obsolete():
705 if ctx.obsolete():
704 msg = obsutil._getfilteredreason(repo, changeid, ctx)
706 msg = obsutil._getfilteredreason(repo, changeid, ctx)
705 else:
707 else:
706 msg = _(b"hidden revision '%s'") % changeid
708 msg = _(b"hidden revision '%s'") % changeid
707
709
708 hint = _(b'use --hidden to access hidden revisions')
710 hint = _(b'use --hidden to access hidden revisions')
709
711
710 return error.FilteredRepoLookupError(msg, hint=hint)
712 return error.FilteredRepoLookupError(msg, hint=hint)
711 msg = _(b"filtered revision '%s' (not in '%s' subset)")
713 msg = _(b"filtered revision '%s' (not in '%s' subset)")
712 msg %= (changeid, repo.filtername)
714 msg %= (changeid, repo.filtername)
713 return error.FilteredRepoLookupError(msg)
715 return error.FilteredRepoLookupError(msg)
714
716
715
717
716 def revsingle(repo, revspec, default=b'.', localalias=None):
718 def revsingle(repo, revspec, default=b'.', localalias=None):
717 if not revspec and revspec != 0:
719 if not revspec and revspec != 0:
718 return repo[default]
720 return repo[default]
719
721
720 l = revrange(repo, [revspec], localalias=localalias)
722 l = revrange(repo, [revspec], localalias=localalias)
721 if not l:
723 if not l:
722 raise error.Abort(_(b'empty revision set'))
724 raise error.Abort(_(b'empty revision set'))
723 return repo[l.last()]
725 return repo[l.last()]
724
726
725
727
726 def _pairspec(revspec):
728 def _pairspec(revspec):
727 tree = revsetlang.parse(revspec)
729 tree = revsetlang.parse(revspec)
728 return tree and tree[0] in (
730 return tree and tree[0] in (
729 b'range',
731 b'range',
730 b'rangepre',
732 b'rangepre',
731 b'rangepost',
733 b'rangepost',
732 b'rangeall',
734 b'rangeall',
733 )
735 )
734
736
735
737
736 def revpair(repo, revs):
738 def revpair(repo, revs):
737 if not revs:
739 if not revs:
738 return repo[b'.'], repo[None]
740 return repo[b'.'], repo[None]
739
741
740 l = revrange(repo, revs)
742 l = revrange(repo, revs)
741
743
742 if not l:
744 if not l:
743 raise error.Abort(_(b'empty revision range'))
745 raise error.Abort(_(b'empty revision range'))
744
746
745 first = l.first()
747 first = l.first()
746 second = l.last()
748 second = l.last()
747
749
748 if (
750 if (
749 first == second
751 first == second
750 and len(revs) >= 2
752 and len(revs) >= 2
751 and not all(revrange(repo, [r]) for r in revs)
753 and not all(revrange(repo, [r]) for r in revs)
752 ):
754 ):
753 raise error.Abort(_(b'empty revision on one side of range'))
755 raise error.Abort(_(b'empty revision on one side of range'))
754
756
755 # if top-level is range expression, the result must always be a pair
757 # if top-level is range expression, the result must always be a pair
756 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
758 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
757 return repo[first], repo[None]
759 return repo[first], repo[None]
758
760
759 return repo[first], repo[second]
761 return repo[first], repo[second]
760
762
761
763
762 def revrange(repo, specs, localalias=None):
764 def revrange(repo, specs, localalias=None):
763 """Execute 1 to many revsets and return the union.
765 """Execute 1 to many revsets and return the union.
764
766
765 This is the preferred mechanism for executing revsets using user-specified
767 This is the preferred mechanism for executing revsets using user-specified
766 config options, such as revset aliases.
768 config options, such as revset aliases.
767
769
768 The revsets specified by ``specs`` will be executed via a chained ``OR``
770 The revsets specified by ``specs`` will be executed via a chained ``OR``
769 expression. If ``specs`` is empty, an empty result is returned.
771 expression. If ``specs`` is empty, an empty result is returned.
770
772
771 ``specs`` can contain integers, in which case they are assumed to be
773 ``specs`` can contain integers, in which case they are assumed to be
772 revision numbers.
774 revision numbers.
773
775
774 It is assumed the revsets are already formatted. If you have arguments
776 It is assumed the revsets are already formatted. If you have arguments
775 that need to be expanded in the revset, call ``revsetlang.formatspec()``
777 that need to be expanded in the revset, call ``revsetlang.formatspec()``
776 and pass the result as an element of ``specs``.
778 and pass the result as an element of ``specs``.
777
779
778 Specifying a single revset is allowed.
780 Specifying a single revset is allowed.
779
781
780 Returns a ``smartset.abstractsmartset`` which is a list-like interface over
782 Returns a ``smartset.abstractsmartset`` which is a list-like interface over
781 integer revisions.
783 integer revisions.
782 """
784 """
783 allspecs = []
785 allspecs = []
784 for spec in specs:
786 for spec in specs:
785 if isinstance(spec, int):
787 if isinstance(spec, int):
786 spec = revsetlang.formatspec(b'%d', spec)
788 spec = revsetlang.formatspec(b'%d', spec)
787 allspecs.append(spec)
789 allspecs.append(spec)
788 return repo.anyrevs(allspecs, user=True, localalias=localalias)
790 return repo.anyrevs(allspecs, user=True, localalias=localalias)
789
791
790
792
791 def increasingwindows(windowsize=8, sizelimit=512):
793 def increasingwindows(windowsize=8, sizelimit=512):
792 while True:
794 while True:
793 yield windowsize
795 yield windowsize
794 if windowsize < sizelimit:
796 if windowsize < sizelimit:
795 windowsize *= 2
797 windowsize *= 2
796
798
797
799
798 def walkchangerevs(repo, revs, makefilematcher, prepare):
800 def walkchangerevs(repo, revs, makefilematcher, prepare):
799 """Iterate over files and the revs in a "windowed" way.
801 """Iterate over files and the revs in a "windowed" way.
800
802
801 Callers most commonly need to iterate backwards over the history
803 Callers most commonly need to iterate backwards over the history
802 in which they are interested. Doing so has awful (quadratic-looking)
804 in which they are interested. Doing so has awful (quadratic-looking)
803 performance, so we use iterators in a "windowed" way.
805 performance, so we use iterators in a "windowed" way.
804
806
805 We walk a window of revisions in the desired order. Within the
807 We walk a window of revisions in the desired order. Within the
806 window, we first walk forwards to gather data, then in the desired
808 window, we first walk forwards to gather data, then in the desired
807 order (usually backwards) to display it.
809 order (usually backwards) to display it.
808
810
809 This function returns an iterator yielding contexts. Before
811 This function returns an iterator yielding contexts. Before
810 yielding each context, the iterator will first call the prepare
812 yielding each context, the iterator will first call the prepare
811 function on each context in the window in forward order."""
813 function on each context in the window in forward order."""
812
814
813 if not revs:
815 if not revs:
814 return []
816 return []
815 change = repo.__getitem__
817 change = repo.__getitem__
816
818
817 def iterate():
819 def iterate():
818 it = iter(revs)
820 it = iter(revs)
819 stopiteration = False
821 stopiteration = False
820 for windowsize in increasingwindows():
822 for windowsize in increasingwindows():
821 nrevs = []
823 nrevs = []
822 for i in pycompat.xrange(windowsize):
824 for i in pycompat.xrange(windowsize):
823 rev = next(it, None)
825 rev = next(it, None)
824 if rev is None:
826 if rev is None:
825 stopiteration = True
827 stopiteration = True
826 break
828 break
827 nrevs.append(rev)
829 nrevs.append(rev)
828 for rev in sorted(nrevs):
830 for rev in sorted(nrevs):
829 ctx = change(rev)
831 ctx = change(rev)
830 prepare(ctx, makefilematcher(ctx))
832 prepare(ctx, makefilematcher(ctx))
831 for rev in nrevs:
833 for rev in nrevs:
832 yield change(rev)
834 yield change(rev)
833
835
834 if stopiteration:
836 if stopiteration:
835 break
837 break
836
838
837 return iterate()
839 return iterate()
838
840
839
841
840 def meaningfulparents(repo, ctx):
842 def meaningfulparents(repo, ctx):
841 """Return list of meaningful (or all if debug) parentrevs for rev.
843 """Return list of meaningful (or all if debug) parentrevs for rev.
842
844
843 For merges (two non-nullrev revisions) both parents are meaningful.
845 For merges (two non-nullrev revisions) both parents are meaningful.
844 Otherwise the first parent revision is considered meaningful if it
846 Otherwise the first parent revision is considered meaningful if it
845 is not the preceding revision.
847 is not the preceding revision.
846 """
848 """
847 parents = ctx.parents()
849 parents = ctx.parents()
848 if len(parents) > 1:
850 if len(parents) > 1:
849 return parents
851 return parents
850 if repo.ui.debugflag:
852 if repo.ui.debugflag:
851 return [parents[0], repo[nullrev]]
853 return [parents[0], repo[nullrev]]
852 if parents[0].rev() >= intrev(ctx) - 1:
854 if parents[0].rev() >= intrev(ctx) - 1:
853 return []
855 return []
854 return parents
856 return parents
855
857
856
858
857 def getuipathfn(repo, legacyrelativevalue=False, forcerelativevalue=None):
859 def getuipathfn(repo, legacyrelativevalue=False, forcerelativevalue=None):
858 """Return a function that produced paths for presenting to the user.
860 """Return a function that produced paths for presenting to the user.
859
861
860 The returned function takes a repo-relative path and produces a path
862 The returned function takes a repo-relative path and produces a path
861 that can be presented in the UI.
863 that can be presented in the UI.
862
864
863 Depending on the value of ui.relative-paths, either a repo-relative or
865 Depending on the value of ui.relative-paths, either a repo-relative or
864 cwd-relative path will be produced.
866 cwd-relative path will be produced.
865
867
866 legacyrelativevalue is the value to use if ui.relative-paths=legacy
868 legacyrelativevalue is the value to use if ui.relative-paths=legacy
867
869
868 If forcerelativevalue is not None, then that value will be used regardless
870 If forcerelativevalue is not None, then that value will be used regardless
869 of what ui.relative-paths is set to.
871 of what ui.relative-paths is set to.
870 """
872 """
871 if forcerelativevalue is not None:
873 if forcerelativevalue is not None:
872 relative = forcerelativevalue
874 relative = forcerelativevalue
873 else:
875 else:
874 config = repo.ui.config(b'ui', b'relative-paths')
876 config = repo.ui.config(b'ui', b'relative-paths')
875 if config == b'legacy':
877 if config == b'legacy':
876 relative = legacyrelativevalue
878 relative = legacyrelativevalue
877 else:
879 else:
878 relative = stringutil.parsebool(config)
880 relative = stringutil.parsebool(config)
879 if relative is None:
881 if relative is None:
880 raise error.ConfigError(
882 raise error.ConfigError(
881 _(b"ui.relative-paths is not a boolean ('%s')") % config
883 _(b"ui.relative-paths is not a boolean ('%s')") % config
882 )
884 )
883
885
884 if relative:
886 if relative:
885 cwd = repo.getcwd()
887 cwd = repo.getcwd()
886 if cwd != b'':
888 if cwd != b'':
887 # this branch would work even if cwd == b'' (ie cwd = repo
889 # this branch would work even if cwd == b'' (ie cwd = repo
888 # root), but its generality makes the returned function slower
890 # root), but its generality makes the returned function slower
889 pathto = repo.pathto
891 pathto = repo.pathto
890 return lambda f: pathto(f, cwd)
892 return lambda f: pathto(f, cwd)
891 if repo.ui.configbool(b'ui', b'slash'):
893 if repo.ui.configbool(b'ui', b'slash'):
892 return lambda f: f
894 return lambda f: f
893 else:
895 else:
894 return util.localpath
896 return util.localpath
895
897
896
898
897 def subdiruipathfn(subpath, uipathfn):
899 def subdiruipathfn(subpath, uipathfn):
898 '''Create a new uipathfn that treats the file as relative to subpath.'''
900 '''Create a new uipathfn that treats the file as relative to subpath.'''
899 return lambda f: uipathfn(posixpath.join(subpath, f))
901 return lambda f: uipathfn(posixpath.join(subpath, f))
900
902
901
903
902 def anypats(pats, opts):
904 def anypats(pats, opts):
903 """Checks if any patterns, including --include and --exclude were given.
905 """Checks if any patterns, including --include and --exclude were given.
904
906
905 Some commands (e.g. addremove) use this condition for deciding whether to
907 Some commands (e.g. addremove) use this condition for deciding whether to
906 print absolute or relative paths.
908 print absolute or relative paths.
907 """
909 """
908 return bool(pats or opts.get(b'include') or opts.get(b'exclude'))
910 return bool(pats or opts.get(b'include') or opts.get(b'exclude'))
909
911
910
912
911 def expandpats(pats):
913 def expandpats(pats):
912 """Expand bare globs when running on windows.
914 """Expand bare globs when running on windows.
913 On posix we assume it already has already been done by sh."""
915 On posix we assume it already has already been done by sh."""
914 if not util.expandglobs:
916 if not util.expandglobs:
915 return list(pats)
917 return list(pats)
916 ret = []
918 ret = []
917 for kindpat in pats:
919 for kindpat in pats:
918 kind, pat = matchmod._patsplit(kindpat, None)
920 kind, pat = matchmod._patsplit(kindpat, None)
919 if kind is None:
921 if kind is None:
920 try:
922 try:
921 globbed = glob.glob(pat)
923 globbed = glob.glob(pat)
922 except re.error:
924 except re.error:
923 globbed = [pat]
925 globbed = [pat]
924 if globbed:
926 if globbed:
925 ret.extend(globbed)
927 ret.extend(globbed)
926 continue
928 continue
927 ret.append(kindpat)
929 ret.append(kindpat)
928 return ret
930 return ret
929
931
930
932
931 def matchandpats(
933 def matchandpats(
932 ctx, pats=(), opts=None, globbed=False, default=b'relpath', badfn=None
934 ctx, pats=(), opts=None, globbed=False, default=b'relpath', badfn=None
933 ):
935 ):
934 """Return a matcher and the patterns that were used.
936 """Return a matcher and the patterns that were used.
935 The matcher will warn about bad matches, unless an alternate badfn callback
937 The matcher will warn about bad matches, unless an alternate badfn callback
936 is provided."""
938 is provided."""
937 if opts is None:
939 if opts is None:
938 opts = {}
940 opts = {}
939 if not globbed and default == b'relpath':
941 if not globbed and default == b'relpath':
940 pats = expandpats(pats or [])
942 pats = expandpats(pats or [])
941
943
942 uipathfn = getuipathfn(ctx.repo(), legacyrelativevalue=True)
944 uipathfn = getuipathfn(ctx.repo(), legacyrelativevalue=True)
943
945
944 def bad(f, msg):
946 def bad(f, msg):
945 ctx.repo().ui.warn(b"%s: %s\n" % (uipathfn(f), msg))
947 ctx.repo().ui.warn(b"%s: %s\n" % (uipathfn(f), msg))
946
948
947 if badfn is None:
949 if badfn is None:
948 badfn = bad
950 badfn = bad
949
951
950 m = ctx.match(
952 m = ctx.match(
951 pats,
953 pats,
952 opts.get(b'include'),
954 opts.get(b'include'),
953 opts.get(b'exclude'),
955 opts.get(b'exclude'),
954 default,
956 default,
955 listsubrepos=opts.get(b'subrepos'),
957 listsubrepos=opts.get(b'subrepos'),
956 badfn=badfn,
958 badfn=badfn,
957 )
959 )
958
960
959 if m.always():
961 if m.always():
960 pats = []
962 pats = []
961 return m, pats
963 return m, pats
962
964
963
965
964 def match(
966 def match(
965 ctx, pats=(), opts=None, globbed=False, default=b'relpath', badfn=None
967 ctx, pats=(), opts=None, globbed=False, default=b'relpath', badfn=None
966 ):
968 ):
967 '''Return a matcher that will warn about bad matches.'''
969 '''Return a matcher that will warn about bad matches.'''
968 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
970 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
969
971
970
972
971 def matchall(repo):
973 def matchall(repo):
972 '''Return a matcher that will efficiently match everything.'''
974 '''Return a matcher that will efficiently match everything.'''
973 return matchmod.always()
975 return matchmod.always()
974
976
975
977
976 def matchfiles(repo, files, badfn=None):
978 def matchfiles(repo, files, badfn=None):
977 '''Return a matcher that will efficiently match exactly these files.'''
979 '''Return a matcher that will efficiently match exactly these files.'''
978 return matchmod.exact(files, badfn=badfn)
980 return matchmod.exact(files, badfn=badfn)
979
981
980
982
981 def parsefollowlinespattern(repo, rev, pat, msg):
983 def parsefollowlinespattern(repo, rev, pat, msg):
982 """Return a file name from `pat` pattern suitable for usage in followlines
984 """Return a file name from `pat` pattern suitable for usage in followlines
983 logic.
985 logic.
984 """
986 """
985 if not matchmod.patkind(pat):
987 if not matchmod.patkind(pat):
986 return pathutil.canonpath(repo.root, repo.getcwd(), pat)
988 return pathutil.canonpath(repo.root, repo.getcwd(), pat)
987 else:
989 else:
988 ctx = repo[rev]
990 ctx = repo[rev]
989 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=ctx)
991 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=ctx)
990 files = [f for f in ctx if m(f)]
992 files = [f for f in ctx if m(f)]
991 if len(files) != 1:
993 if len(files) != 1:
992 raise error.ParseError(msg)
994 raise error.ParseError(msg)
993 return files[0]
995 return files[0]
994
996
995
997
996 def getorigvfs(ui, repo):
998 def getorigvfs(ui, repo):
997 """return a vfs suitable to save 'orig' file
999 """return a vfs suitable to save 'orig' file
998
1000
999 return None if no special directory is configured"""
1001 return None if no special directory is configured"""
1000 origbackuppath = ui.config(b'ui', b'origbackuppath')
1002 origbackuppath = ui.config(b'ui', b'origbackuppath')
1001 if not origbackuppath:
1003 if not origbackuppath:
1002 return None
1004 return None
1003 return vfs.vfs(repo.wvfs.join(origbackuppath))
1005 return vfs.vfs(repo.wvfs.join(origbackuppath))
1004
1006
1005
1007
1006 def backuppath(ui, repo, filepath):
1008 def backuppath(ui, repo, filepath):
1007 """customize where working copy backup files (.orig files) are created
1009 """customize where working copy backup files (.orig files) are created
1008
1010
1009 Fetch user defined path from config file: [ui] origbackuppath = <path>
1011 Fetch user defined path from config file: [ui] origbackuppath = <path>
1010 Fall back to default (filepath with .orig suffix) if not specified
1012 Fall back to default (filepath with .orig suffix) if not specified
1011
1013
1012 filepath is repo-relative
1014 filepath is repo-relative
1013
1015
1014 Returns an absolute path
1016 Returns an absolute path
1015 """
1017 """
1016 origvfs = getorigvfs(ui, repo)
1018 origvfs = getorigvfs(ui, repo)
1017 if origvfs is None:
1019 if origvfs is None:
1018 return repo.wjoin(filepath + b".orig")
1020 return repo.wjoin(filepath + b".orig")
1019
1021
1020 origbackupdir = origvfs.dirname(filepath)
1022 origbackupdir = origvfs.dirname(filepath)
1021 if not origvfs.isdir(origbackupdir) or origvfs.islink(origbackupdir):
1023 if not origvfs.isdir(origbackupdir) or origvfs.islink(origbackupdir):
1022 ui.note(_(b'creating directory: %s\n') % origvfs.join(origbackupdir))
1024 ui.note(_(b'creating directory: %s\n') % origvfs.join(origbackupdir))
1023
1025
1024 # Remove any files that conflict with the backup file's path
1026 # Remove any files that conflict with the backup file's path
1025 for f in reversed(list(pathutil.finddirs(filepath))):
1027 for f in reversed(list(pathutil.finddirs(filepath))):
1026 if origvfs.isfileorlink(f):
1028 if origvfs.isfileorlink(f):
1027 ui.note(_(b'removing conflicting file: %s\n') % origvfs.join(f))
1029 ui.note(_(b'removing conflicting file: %s\n') % origvfs.join(f))
1028 origvfs.unlink(f)
1030 origvfs.unlink(f)
1029 break
1031 break
1030
1032
1031 origvfs.makedirs(origbackupdir)
1033 origvfs.makedirs(origbackupdir)
1032
1034
1033 if origvfs.isdir(filepath) and not origvfs.islink(filepath):
1035 if origvfs.isdir(filepath) and not origvfs.islink(filepath):
1034 ui.note(
1036 ui.note(
1035 _(b'removing conflicting directory: %s\n') % origvfs.join(filepath)
1037 _(b'removing conflicting directory: %s\n') % origvfs.join(filepath)
1036 )
1038 )
1037 origvfs.rmtree(filepath, forcibly=True)
1039 origvfs.rmtree(filepath, forcibly=True)
1038
1040
1039 return origvfs.join(filepath)
1041 return origvfs.join(filepath)
1040
1042
1041
1043
1042 class _containsnode(object):
1044 class _containsnode(object):
1043 """proxy __contains__(node) to container.__contains__ which accepts revs"""
1045 """proxy __contains__(node) to container.__contains__ which accepts revs"""
1044
1046
1045 def __init__(self, repo, revcontainer):
1047 def __init__(self, repo, revcontainer):
1046 self._torev = repo.changelog.rev
1048 self._torev = repo.changelog.rev
1047 self._revcontains = revcontainer.__contains__
1049 self._revcontains = revcontainer.__contains__
1048
1050
1049 def __contains__(self, node):
1051 def __contains__(self, node):
1050 return self._revcontains(self._torev(node))
1052 return self._revcontains(self._torev(node))
1051
1053
1052
1054
1053 def cleanupnodes(
1055 def cleanupnodes(
1054 repo,
1056 repo,
1055 replacements,
1057 replacements,
1056 operation,
1058 operation,
1057 moves=None,
1059 moves=None,
1058 metadata=None,
1060 metadata=None,
1059 fixphase=False,
1061 fixphase=False,
1060 targetphase=None,
1062 targetphase=None,
1061 backup=True,
1063 backup=True,
1062 ):
1064 ):
1063 """do common cleanups when old nodes are replaced by new nodes
1065 """do common cleanups when old nodes are replaced by new nodes
1064
1066
1065 That includes writing obsmarkers or stripping nodes, and moving bookmarks.
1067 That includes writing obsmarkers or stripping nodes, and moving bookmarks.
1066 (we might also want to move working directory parent in the future)
1068 (we might also want to move working directory parent in the future)
1067
1069
1068 By default, bookmark moves are calculated automatically from 'replacements',
1070 By default, bookmark moves are calculated automatically from 'replacements',
1069 but 'moves' can be used to override that. Also, 'moves' may include
1071 but 'moves' can be used to override that. Also, 'moves' may include
1070 additional bookmark moves that should not have associated obsmarkers.
1072 additional bookmark moves that should not have associated obsmarkers.
1071
1073
1072 replacements is {oldnode: [newnode]} or a iterable of nodes if they do not
1074 replacements is {oldnode: [newnode]} or a iterable of nodes if they do not
1073 have replacements. operation is a string, like "rebase".
1075 have replacements. operation is a string, like "rebase".
1074
1076
1075 metadata is dictionary containing metadata to be stored in obsmarker if
1077 metadata is dictionary containing metadata to be stored in obsmarker if
1076 obsolescence is enabled.
1078 obsolescence is enabled.
1077 """
1079 """
1078 assert fixphase or targetphase is None
1080 assert fixphase or targetphase is None
1079 if not replacements and not moves:
1081 if not replacements and not moves:
1080 return
1082 return
1081
1083
1082 # translate mapping's other forms
1084 # translate mapping's other forms
1083 if not util.safehasattr(replacements, b'items'):
1085 if not util.safehasattr(replacements, b'items'):
1084 replacements = {(n,): () for n in replacements}
1086 replacements = {(n,): () for n in replacements}
1085 else:
1087 else:
1086 # upgrading non tuple "source" to tuple ones for BC
1088 # upgrading non tuple "source" to tuple ones for BC
1087 repls = {}
1089 repls = {}
1088 for key, value in replacements.items():
1090 for key, value in replacements.items():
1089 if not isinstance(key, tuple):
1091 if not isinstance(key, tuple):
1090 key = (key,)
1092 key = (key,)
1091 repls[key] = value
1093 repls[key] = value
1092 replacements = repls
1094 replacements = repls
1093
1095
1094 # Unfiltered repo is needed since nodes in replacements might be hidden.
1096 # Unfiltered repo is needed since nodes in replacements might be hidden.
1095 unfi = repo.unfiltered()
1097 unfi = repo.unfiltered()
1096
1098
1097 # Calculate bookmark movements
1099 # Calculate bookmark movements
1098 if moves is None:
1100 if moves is None:
1099 moves = {}
1101 moves = {}
1100 for oldnodes, newnodes in replacements.items():
1102 for oldnodes, newnodes in replacements.items():
1101 for oldnode in oldnodes:
1103 for oldnode in oldnodes:
1102 if oldnode in moves:
1104 if oldnode in moves:
1103 continue
1105 continue
1104 if len(newnodes) > 1:
1106 if len(newnodes) > 1:
1105 # usually a split, take the one with biggest rev number
1107 # usually a split, take the one with biggest rev number
1106 newnode = next(unfi.set(b'max(%ln)', newnodes)).node()
1108 newnode = next(unfi.set(b'max(%ln)', newnodes)).node()
1107 elif len(newnodes) == 0:
1109 elif len(newnodes) == 0:
1108 # move bookmark backwards
1110 # move bookmark backwards
1109 allreplaced = []
1111 allreplaced = []
1110 for rep in replacements:
1112 for rep in replacements:
1111 allreplaced.extend(rep)
1113 allreplaced.extend(rep)
1112 roots = list(
1114 roots = list(
1113 unfi.set(b'max((::%n) - %ln)', oldnode, allreplaced)
1115 unfi.set(b'max((::%n) - %ln)', oldnode, allreplaced)
1114 )
1116 )
1115 if roots:
1117 if roots:
1116 newnode = roots[0].node()
1118 newnode = roots[0].node()
1117 else:
1119 else:
1118 newnode = nullid
1120 newnode = nullid
1119 else:
1121 else:
1120 newnode = newnodes[0]
1122 newnode = newnodes[0]
1121 moves[oldnode] = newnode
1123 moves[oldnode] = newnode
1122
1124
1123 allnewnodes = [n for ns in replacements.values() for n in ns]
1125 allnewnodes = [n for ns in replacements.values() for n in ns]
1124 toretract = {}
1126 toretract = {}
1125 toadvance = {}
1127 toadvance = {}
1126 if fixphase:
1128 if fixphase:
1127 precursors = {}
1129 precursors = {}
1128 for oldnodes, newnodes in replacements.items():
1130 for oldnodes, newnodes in replacements.items():
1129 for oldnode in oldnodes:
1131 for oldnode in oldnodes:
1130 for newnode in newnodes:
1132 for newnode in newnodes:
1131 precursors.setdefault(newnode, []).append(oldnode)
1133 precursors.setdefault(newnode, []).append(oldnode)
1132
1134
1133 allnewnodes.sort(key=lambda n: unfi[n].rev())
1135 allnewnodes.sort(key=lambda n: unfi[n].rev())
1134 newphases = {}
1136 newphases = {}
1135
1137
1136 def phase(ctx):
1138 def phase(ctx):
1137 return newphases.get(ctx.node(), ctx.phase())
1139 return newphases.get(ctx.node(), ctx.phase())
1138
1140
1139 for newnode in allnewnodes:
1141 for newnode in allnewnodes:
1140 ctx = unfi[newnode]
1142 ctx = unfi[newnode]
1141 parentphase = max(phase(p) for p in ctx.parents())
1143 parentphase = max(phase(p) for p in ctx.parents())
1142 if targetphase is None:
1144 if targetphase is None:
1143 oldphase = max(
1145 oldphase = max(
1144 unfi[oldnode].phase() for oldnode in precursors[newnode]
1146 unfi[oldnode].phase() for oldnode in precursors[newnode]
1145 )
1147 )
1146 newphase = max(oldphase, parentphase)
1148 newphase = max(oldphase, parentphase)
1147 else:
1149 else:
1148 newphase = max(targetphase, parentphase)
1150 newphase = max(targetphase, parentphase)
1149 newphases[newnode] = newphase
1151 newphases[newnode] = newphase
1150 if newphase > ctx.phase():
1152 if newphase > ctx.phase():
1151 toretract.setdefault(newphase, []).append(newnode)
1153 toretract.setdefault(newphase, []).append(newnode)
1152 elif newphase < ctx.phase():
1154 elif newphase < ctx.phase():
1153 toadvance.setdefault(newphase, []).append(newnode)
1155 toadvance.setdefault(newphase, []).append(newnode)
1154
1156
1155 with repo.transaction(b'cleanup') as tr:
1157 with repo.transaction(b'cleanup') as tr:
1156 # Move bookmarks
1158 # Move bookmarks
1157 bmarks = repo._bookmarks
1159 bmarks = repo._bookmarks
1158 bmarkchanges = []
1160 bmarkchanges = []
1159 for oldnode, newnode in moves.items():
1161 for oldnode, newnode in moves.items():
1160 oldbmarks = repo.nodebookmarks(oldnode)
1162 oldbmarks = repo.nodebookmarks(oldnode)
1161 if not oldbmarks:
1163 if not oldbmarks:
1162 continue
1164 continue
1163 from . import bookmarks # avoid import cycle
1165 from . import bookmarks # avoid import cycle
1164
1166
1165 repo.ui.debug(
1167 repo.ui.debug(
1166 b'moving bookmarks %r from %s to %s\n'
1168 b'moving bookmarks %r from %s to %s\n'
1167 % (
1169 % (
1168 pycompat.rapply(pycompat.maybebytestr, oldbmarks),
1170 pycompat.rapply(pycompat.maybebytestr, oldbmarks),
1169 hex(oldnode),
1171 hex(oldnode),
1170 hex(newnode),
1172 hex(newnode),
1171 )
1173 )
1172 )
1174 )
1173 # Delete divergent bookmarks being parents of related newnodes
1175 # Delete divergent bookmarks being parents of related newnodes
1174 deleterevs = repo.revs(
1176 deleterevs = repo.revs(
1175 b'parents(roots(%ln & (::%n))) - parents(%n)',
1177 b'parents(roots(%ln & (::%n))) - parents(%n)',
1176 allnewnodes,
1178 allnewnodes,
1177 newnode,
1179 newnode,
1178 oldnode,
1180 oldnode,
1179 )
1181 )
1180 deletenodes = _containsnode(repo, deleterevs)
1182 deletenodes = _containsnode(repo, deleterevs)
1181 for name in oldbmarks:
1183 for name in oldbmarks:
1182 bmarkchanges.append((name, newnode))
1184 bmarkchanges.append((name, newnode))
1183 for b in bookmarks.divergent2delete(repo, deletenodes, name):
1185 for b in bookmarks.divergent2delete(repo, deletenodes, name):
1184 bmarkchanges.append((b, None))
1186 bmarkchanges.append((b, None))
1185
1187
1186 if bmarkchanges:
1188 if bmarkchanges:
1187 bmarks.applychanges(repo, tr, bmarkchanges)
1189 bmarks.applychanges(repo, tr, bmarkchanges)
1188
1190
1189 for phase, nodes in toretract.items():
1191 for phase, nodes in toretract.items():
1190 phases.retractboundary(repo, tr, phase, nodes)
1192 phases.retractboundary(repo, tr, phase, nodes)
1191 for phase, nodes in toadvance.items():
1193 for phase, nodes in toadvance.items():
1192 phases.advanceboundary(repo, tr, phase, nodes)
1194 phases.advanceboundary(repo, tr, phase, nodes)
1193
1195
1194 mayusearchived = repo.ui.config(b'experimental', b'cleanup-as-archived')
1196 mayusearchived = repo.ui.config(b'experimental', b'cleanup-as-archived')
1195 # Obsolete or strip nodes
1197 # Obsolete or strip nodes
1196 if obsolete.isenabled(repo, obsolete.createmarkersopt):
1198 if obsolete.isenabled(repo, obsolete.createmarkersopt):
1197 # If a node is already obsoleted, and we want to obsolete it
1199 # If a node is already obsoleted, and we want to obsolete it
1198 # without a successor, skip that obssolete request since it's
1200 # without a successor, skip that obssolete request since it's
1199 # unnecessary. That's the "if s or not isobs(n)" check below.
1201 # unnecessary. That's the "if s or not isobs(n)" check below.
1200 # Also sort the node in topology order, that might be useful for
1202 # Also sort the node in topology order, that might be useful for
1201 # some obsstore logic.
1203 # some obsstore logic.
1202 # NOTE: the sorting might belong to createmarkers.
1204 # NOTE: the sorting might belong to createmarkers.
1203 torev = unfi.changelog.rev
1205 torev = unfi.changelog.rev
1204 sortfunc = lambda ns: torev(ns[0][0])
1206 sortfunc = lambda ns: torev(ns[0][0])
1205 rels = []
1207 rels = []
1206 for ns, s in sorted(replacements.items(), key=sortfunc):
1208 for ns, s in sorted(replacements.items(), key=sortfunc):
1207 rel = (tuple(unfi[n] for n in ns), tuple(unfi[m] for m in s))
1209 rel = (tuple(unfi[n] for n in ns), tuple(unfi[m] for m in s))
1208 rels.append(rel)
1210 rels.append(rel)
1209 if rels:
1211 if rels:
1210 obsolete.createmarkers(
1212 obsolete.createmarkers(
1211 repo, rels, operation=operation, metadata=metadata
1213 repo, rels, operation=operation, metadata=metadata
1212 )
1214 )
1213 elif phases.supportinternal(repo) and mayusearchived:
1215 elif phases.supportinternal(repo) and mayusearchived:
1214 # this assume we do not have "unstable" nodes above the cleaned ones
1216 # this assume we do not have "unstable" nodes above the cleaned ones
1215 allreplaced = set()
1217 allreplaced = set()
1216 for ns in replacements.keys():
1218 for ns in replacements.keys():
1217 allreplaced.update(ns)
1219 allreplaced.update(ns)
1218 if backup:
1220 if backup:
1219 from . import repair # avoid import cycle
1221 from . import repair # avoid import cycle
1220
1222
1221 node = min(allreplaced, key=repo.changelog.rev)
1223 node = min(allreplaced, key=repo.changelog.rev)
1222 repair.backupbundle(
1224 repair.backupbundle(
1223 repo, allreplaced, allreplaced, node, operation
1225 repo, allreplaced, allreplaced, node, operation
1224 )
1226 )
1225 phases.retractboundary(repo, tr, phases.archived, allreplaced)
1227 phases.retractboundary(repo, tr, phases.archived, allreplaced)
1226 else:
1228 else:
1227 from . import repair # avoid import cycle
1229 from . import repair # avoid import cycle
1228
1230
1229 tostrip = list(n for ns in replacements for n in ns)
1231 tostrip = list(n for ns in replacements for n in ns)
1230 if tostrip:
1232 if tostrip:
1231 repair.delayedstrip(
1233 repair.delayedstrip(
1232 repo.ui, repo, tostrip, operation, backup=backup
1234 repo.ui, repo, tostrip, operation, backup=backup
1233 )
1235 )
1234
1236
1235
1237
1236 def addremove(repo, matcher, prefix, uipathfn, opts=None):
1238 def addremove(repo, matcher, prefix, uipathfn, opts=None):
1237 if opts is None:
1239 if opts is None:
1238 opts = {}
1240 opts = {}
1239 m = matcher
1241 m = matcher
1240 dry_run = opts.get(b'dry_run')
1242 dry_run = opts.get(b'dry_run')
1241 try:
1243 try:
1242 similarity = float(opts.get(b'similarity') or 0)
1244 similarity = float(opts.get(b'similarity') or 0)
1243 except ValueError:
1245 except ValueError:
1244 raise error.Abort(_(b'similarity must be a number'))
1246 raise error.Abort(_(b'similarity must be a number'))
1245 if similarity < 0 or similarity > 100:
1247 if similarity < 0 or similarity > 100:
1246 raise error.Abort(_(b'similarity must be between 0 and 100'))
1248 raise error.Abort(_(b'similarity must be between 0 and 100'))
1247 similarity /= 100.0
1249 similarity /= 100.0
1248
1250
1249 ret = 0
1251 ret = 0
1250
1252
1251 wctx = repo[None]
1253 wctx = repo[None]
1252 for subpath in sorted(wctx.substate):
1254 for subpath in sorted(wctx.substate):
1253 submatch = matchmod.subdirmatcher(subpath, m)
1255 submatch = matchmod.subdirmatcher(subpath, m)
1254 if opts.get(b'subrepos') or m.exact(subpath) or any(submatch.files()):
1256 if opts.get(b'subrepos') or m.exact(subpath) or any(submatch.files()):
1255 sub = wctx.sub(subpath)
1257 sub = wctx.sub(subpath)
1256 subprefix = repo.wvfs.reljoin(prefix, subpath)
1258 subprefix = repo.wvfs.reljoin(prefix, subpath)
1257 subuipathfn = subdiruipathfn(subpath, uipathfn)
1259 subuipathfn = subdiruipathfn(subpath, uipathfn)
1258 try:
1260 try:
1259 if sub.addremove(submatch, subprefix, subuipathfn, opts):
1261 if sub.addremove(submatch, subprefix, subuipathfn, opts):
1260 ret = 1
1262 ret = 1
1261 except error.LookupError:
1263 except error.LookupError:
1262 repo.ui.status(
1264 repo.ui.status(
1263 _(b"skipping missing subrepository: %s\n")
1265 _(b"skipping missing subrepository: %s\n")
1264 % uipathfn(subpath)
1266 % uipathfn(subpath)
1265 )
1267 )
1266
1268
1267 rejected = []
1269 rejected = []
1268
1270
1269 def badfn(f, msg):
1271 def badfn(f, msg):
1270 if f in m.files():
1272 if f in m.files():
1271 m.bad(f, msg)
1273 m.bad(f, msg)
1272 rejected.append(f)
1274 rejected.append(f)
1273
1275
1274 badmatch = matchmod.badmatch(m, badfn)
1276 badmatch = matchmod.badmatch(m, badfn)
1275 added, unknown, deleted, removed, forgotten = _interestingfiles(
1277 added, unknown, deleted, removed, forgotten = _interestingfiles(
1276 repo, badmatch
1278 repo, badmatch
1277 )
1279 )
1278
1280
1279 unknownset = set(unknown + forgotten)
1281 unknownset = set(unknown + forgotten)
1280 toprint = unknownset.copy()
1282 toprint = unknownset.copy()
1281 toprint.update(deleted)
1283 toprint.update(deleted)
1282 for abs in sorted(toprint):
1284 for abs in sorted(toprint):
1283 if repo.ui.verbose or not m.exact(abs):
1285 if repo.ui.verbose or not m.exact(abs):
1284 if abs in unknownset:
1286 if abs in unknownset:
1285 status = _(b'adding %s\n') % uipathfn(abs)
1287 status = _(b'adding %s\n') % uipathfn(abs)
1286 label = b'ui.addremove.added'
1288 label = b'ui.addremove.added'
1287 else:
1289 else:
1288 status = _(b'removing %s\n') % uipathfn(abs)
1290 status = _(b'removing %s\n') % uipathfn(abs)
1289 label = b'ui.addremove.removed'
1291 label = b'ui.addremove.removed'
1290 repo.ui.status(status, label=label)
1292 repo.ui.status(status, label=label)
1291
1293
1292 renames = _findrenames(
1294 renames = _findrenames(
1293 repo, m, added + unknown, removed + deleted, similarity, uipathfn
1295 repo, m, added + unknown, removed + deleted, similarity, uipathfn
1294 )
1296 )
1295
1297
1296 if not dry_run:
1298 if not dry_run:
1297 _markchanges(repo, unknown + forgotten, deleted, renames)
1299 _markchanges(repo, unknown + forgotten, deleted, renames)
1298
1300
1299 for f in rejected:
1301 for f in rejected:
1300 if f in m.files():
1302 if f in m.files():
1301 return 1
1303 return 1
1302 return ret
1304 return ret
1303
1305
1304
1306
1305 def marktouched(repo, files, similarity=0.0):
1307 def marktouched(repo, files, similarity=0.0):
1306 """Assert that files have somehow been operated upon. files are relative to
1308 """Assert that files have somehow been operated upon. files are relative to
1307 the repo root."""
1309 the repo root."""
1308 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
1310 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
1309 rejected = []
1311 rejected = []
1310
1312
1311 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
1313 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
1312
1314
1313 if repo.ui.verbose:
1315 if repo.ui.verbose:
1314 unknownset = set(unknown + forgotten)
1316 unknownset = set(unknown + forgotten)
1315 toprint = unknownset.copy()
1317 toprint = unknownset.copy()
1316 toprint.update(deleted)
1318 toprint.update(deleted)
1317 for abs in sorted(toprint):
1319 for abs in sorted(toprint):
1318 if abs in unknownset:
1320 if abs in unknownset:
1319 status = _(b'adding %s\n') % abs
1321 status = _(b'adding %s\n') % abs
1320 else:
1322 else:
1321 status = _(b'removing %s\n') % abs
1323 status = _(b'removing %s\n') % abs
1322 repo.ui.status(status)
1324 repo.ui.status(status)
1323
1325
1324 # TODO: We should probably have the caller pass in uipathfn and apply it to
1326 # TODO: We should probably have the caller pass in uipathfn and apply it to
1325 # the messages above too. legacyrelativevalue=True is consistent with how
1327 # the messages above too. legacyrelativevalue=True is consistent with how
1326 # it used to work.
1328 # it used to work.
1327 uipathfn = getuipathfn(repo, legacyrelativevalue=True)
1329 uipathfn = getuipathfn(repo, legacyrelativevalue=True)
1328 renames = _findrenames(
1330 renames = _findrenames(
1329 repo, m, added + unknown, removed + deleted, similarity, uipathfn
1331 repo, m, added + unknown, removed + deleted, similarity, uipathfn
1330 )
1332 )
1331
1333
1332 _markchanges(repo, unknown + forgotten, deleted, renames)
1334 _markchanges(repo, unknown + forgotten, deleted, renames)
1333
1335
1334 for f in rejected:
1336 for f in rejected:
1335 if f in m.files():
1337 if f in m.files():
1336 return 1
1338 return 1
1337 return 0
1339 return 0
1338
1340
1339
1341
1340 def _interestingfiles(repo, matcher):
1342 def _interestingfiles(repo, matcher):
1341 """Walk dirstate with matcher, looking for files that addremove would care
1343 """Walk dirstate with matcher, looking for files that addremove would care
1342 about.
1344 about.
1343
1345
1344 This is different from dirstate.status because it doesn't care about
1346 This is different from dirstate.status because it doesn't care about
1345 whether files are modified or clean."""
1347 whether files are modified or clean."""
1346 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1348 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1347 audit_path = pathutil.pathauditor(repo.root, cached=True)
1349 audit_path = pathutil.pathauditor(repo.root, cached=True)
1348
1350
1349 ctx = repo[None]
1351 ctx = repo[None]
1350 dirstate = repo.dirstate
1352 dirstate = repo.dirstate
1351 matcher = repo.narrowmatch(matcher, includeexact=True)
1353 matcher = repo.narrowmatch(matcher, includeexact=True)
1352 walkresults = dirstate.walk(
1354 walkresults = dirstate.walk(
1353 matcher,
1355 matcher,
1354 subrepos=sorted(ctx.substate),
1356 subrepos=sorted(ctx.substate),
1355 unknown=True,
1357 unknown=True,
1356 ignored=False,
1358 ignored=False,
1357 full=False,
1359 full=False,
1358 )
1360 )
1359 for abs, st in pycompat.iteritems(walkresults):
1361 for abs, st in pycompat.iteritems(walkresults):
1360 dstate = dirstate[abs]
1362 dstate = dirstate[abs]
1361 if dstate == b'?' and audit_path.check(abs):
1363 if dstate == b'?' and audit_path.check(abs):
1362 unknown.append(abs)
1364 unknown.append(abs)
1363 elif dstate != b'r' and not st:
1365 elif dstate != b'r' and not st:
1364 deleted.append(abs)
1366 deleted.append(abs)
1365 elif dstate == b'r' and st:
1367 elif dstate == b'r' and st:
1366 forgotten.append(abs)
1368 forgotten.append(abs)
1367 # for finding renames
1369 # for finding renames
1368 elif dstate == b'r' and not st:
1370 elif dstate == b'r' and not st:
1369 removed.append(abs)
1371 removed.append(abs)
1370 elif dstate == b'a':
1372 elif dstate == b'a':
1371 added.append(abs)
1373 added.append(abs)
1372
1374
1373 return added, unknown, deleted, removed, forgotten
1375 return added, unknown, deleted, removed, forgotten
1374
1376
1375
1377
1376 def _findrenames(repo, matcher, added, removed, similarity, uipathfn):
1378 def _findrenames(repo, matcher, added, removed, similarity, uipathfn):
1377 '''Find renames from removed files to added ones.'''
1379 '''Find renames from removed files to added ones.'''
1378 renames = {}
1380 renames = {}
1379 if similarity > 0:
1381 if similarity > 0:
1380 for old, new, score in similar.findrenames(
1382 for old, new, score in similar.findrenames(
1381 repo, added, removed, similarity
1383 repo, added, removed, similarity
1382 ):
1384 ):
1383 if (
1385 if (
1384 repo.ui.verbose
1386 repo.ui.verbose
1385 or not matcher.exact(old)
1387 or not matcher.exact(old)
1386 or not matcher.exact(new)
1388 or not matcher.exact(new)
1387 ):
1389 ):
1388 repo.ui.status(
1390 repo.ui.status(
1389 _(
1391 _(
1390 b'recording removal of %s as rename to %s '
1392 b'recording removal of %s as rename to %s '
1391 b'(%d%% similar)\n'
1393 b'(%d%% similar)\n'
1392 )
1394 )
1393 % (uipathfn(old), uipathfn(new), score * 100)
1395 % (uipathfn(old), uipathfn(new), score * 100)
1394 )
1396 )
1395 renames[new] = old
1397 renames[new] = old
1396 return renames
1398 return renames
1397
1399
1398
1400
1399 def _markchanges(repo, unknown, deleted, renames):
1401 def _markchanges(repo, unknown, deleted, renames):
1400 """Marks the files in unknown as added, the files in deleted as removed,
1402 """Marks the files in unknown as added, the files in deleted as removed,
1401 and the files in renames as copied."""
1403 and the files in renames as copied."""
1402 wctx = repo[None]
1404 wctx = repo[None]
1403 with repo.wlock():
1405 with repo.wlock():
1404 wctx.forget(deleted)
1406 wctx.forget(deleted)
1405 wctx.add(unknown)
1407 wctx.add(unknown)
1406 for new, old in pycompat.iteritems(renames):
1408 for new, old in pycompat.iteritems(renames):
1407 wctx.copy(old, new)
1409 wctx.copy(old, new)
1408
1410
1409
1411
1410 def getrenamedfn(repo, endrev=None):
1412 def getrenamedfn(repo, endrev=None):
1411 if copiesmod.usechangesetcentricalgo(repo):
1413 if copiesmod.usechangesetcentricalgo(repo):
1412
1414
1413 def getrenamed(fn, rev):
1415 def getrenamed(fn, rev):
1414 ctx = repo[rev]
1416 ctx = repo[rev]
1415 p1copies = ctx.p1copies()
1417 p1copies = ctx.p1copies()
1416 if fn in p1copies:
1418 if fn in p1copies:
1417 return p1copies[fn]
1419 return p1copies[fn]
1418 p2copies = ctx.p2copies()
1420 p2copies = ctx.p2copies()
1419 if fn in p2copies:
1421 if fn in p2copies:
1420 return p2copies[fn]
1422 return p2copies[fn]
1421 return None
1423 return None
1422
1424
1423 return getrenamed
1425 return getrenamed
1424
1426
1425 rcache = {}
1427 rcache = {}
1426 if endrev is None:
1428 if endrev is None:
1427 endrev = len(repo)
1429 endrev = len(repo)
1428
1430
1429 def getrenamed(fn, rev):
1431 def getrenamed(fn, rev):
1430 """looks up all renames for a file (up to endrev) the first
1432 """looks up all renames for a file (up to endrev) the first
1431 time the file is given. It indexes on the changerev and only
1433 time the file is given. It indexes on the changerev and only
1432 parses the manifest if linkrev != changerev.
1434 parses the manifest if linkrev != changerev.
1433 Returns rename info for fn at changerev rev."""
1435 Returns rename info for fn at changerev rev."""
1434 if fn not in rcache:
1436 if fn not in rcache:
1435 rcache[fn] = {}
1437 rcache[fn] = {}
1436 fl = repo.file(fn)
1438 fl = repo.file(fn)
1437 for i in fl:
1439 for i in fl:
1438 lr = fl.linkrev(i)
1440 lr = fl.linkrev(i)
1439 renamed = fl.renamed(fl.node(i))
1441 renamed = fl.renamed(fl.node(i))
1440 rcache[fn][lr] = renamed and renamed[0]
1442 rcache[fn][lr] = renamed and renamed[0]
1441 if lr >= endrev:
1443 if lr >= endrev:
1442 break
1444 break
1443 if rev in rcache[fn]:
1445 if rev in rcache[fn]:
1444 return rcache[fn][rev]
1446 return rcache[fn][rev]
1445
1447
1446 # If linkrev != rev (i.e. rev not found in rcache) fallback to
1448 # If linkrev != rev (i.e. rev not found in rcache) fallback to
1447 # filectx logic.
1449 # filectx logic.
1448 try:
1450 try:
1449 return repo[rev][fn].copysource()
1451 return repo[rev][fn].copysource()
1450 except error.LookupError:
1452 except error.LookupError:
1451 return None
1453 return None
1452
1454
1453 return getrenamed
1455 return getrenamed
1454
1456
1455
1457
1456 def getcopiesfn(repo, endrev=None):
1458 def getcopiesfn(repo, endrev=None):
1457 if copiesmod.usechangesetcentricalgo(repo):
1459 if copiesmod.usechangesetcentricalgo(repo):
1458
1460
1459 def copiesfn(ctx):
1461 def copiesfn(ctx):
1460 if ctx.p2copies():
1462 if ctx.p2copies():
1461 allcopies = ctx.p1copies().copy()
1463 allcopies = ctx.p1copies().copy()
1462 # There should be no overlap
1464 # There should be no overlap
1463 allcopies.update(ctx.p2copies())
1465 allcopies.update(ctx.p2copies())
1464 return sorted(allcopies.items())
1466 return sorted(allcopies.items())
1465 else:
1467 else:
1466 return sorted(ctx.p1copies().items())
1468 return sorted(ctx.p1copies().items())
1467
1469
1468 else:
1470 else:
1469 getrenamed = getrenamedfn(repo, endrev)
1471 getrenamed = getrenamedfn(repo, endrev)
1470
1472
1471 def copiesfn(ctx):
1473 def copiesfn(ctx):
1472 copies = []
1474 copies = []
1473 for fn in ctx.files():
1475 for fn in ctx.files():
1474 rename = getrenamed(fn, ctx.rev())
1476 rename = getrenamed(fn, ctx.rev())
1475 if rename:
1477 if rename:
1476 copies.append((fn, rename))
1478 copies.append((fn, rename))
1477 return copies
1479 return copies
1478
1480
1479 return copiesfn
1481 return copiesfn
1480
1482
1481
1483
1482 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1484 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1483 """Update the dirstate to reflect the intent of copying src to dst. For
1485 """Update the dirstate to reflect the intent of copying src to dst. For
1484 different reasons it might not end with dst being marked as copied from src.
1486 different reasons it might not end with dst being marked as copied from src.
1485 """
1487 """
1486 origsrc = repo.dirstate.copied(src) or src
1488 origsrc = repo.dirstate.copied(src) or src
1487 if dst == origsrc: # copying back a copy?
1489 if dst == origsrc: # copying back a copy?
1488 if repo.dirstate[dst] not in b'mn' and not dryrun:
1490 if repo.dirstate[dst] not in b'mn' and not dryrun:
1489 repo.dirstate.normallookup(dst)
1491 repo.dirstate.normallookup(dst)
1490 else:
1492 else:
1491 if repo.dirstate[origsrc] == b'a' and origsrc == src:
1493 if repo.dirstate[origsrc] == b'a' and origsrc == src:
1492 if not ui.quiet:
1494 if not ui.quiet:
1493 ui.warn(
1495 ui.warn(
1494 _(
1496 _(
1495 b"%s has not been committed yet, so no copy "
1497 b"%s has not been committed yet, so no copy "
1496 b"data will be stored for %s.\n"
1498 b"data will be stored for %s.\n"
1497 )
1499 )
1498 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd))
1500 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd))
1499 )
1501 )
1500 if repo.dirstate[dst] in b'?r' and not dryrun:
1502 if repo.dirstate[dst] in b'?r' and not dryrun:
1501 wctx.add([dst])
1503 wctx.add([dst])
1502 elif not dryrun:
1504 elif not dryrun:
1503 wctx.copy(origsrc, dst)
1505 wctx.copy(origsrc, dst)
1504
1506
1505
1507
1506 def movedirstate(repo, newctx, match=None):
1508 def movedirstate(repo, newctx, match=None):
1507 """Move the dirstate to newctx and adjust it as necessary.
1509 """Move the dirstate to newctx and adjust it as necessary.
1508
1510
1509 A matcher can be provided as an optimization. It is probably a bug to pass
1511 A matcher can be provided as an optimization. It is probably a bug to pass
1510 a matcher that doesn't match all the differences between the parent of the
1512 a matcher that doesn't match all the differences between the parent of the
1511 working copy and newctx.
1513 working copy and newctx.
1512 """
1514 """
1513 oldctx = repo[b'.']
1515 oldctx = repo[b'.']
1514 ds = repo.dirstate
1516 ds = repo.dirstate
1515 copies = dict(ds.copies())
1517 copies = dict(ds.copies())
1516 ds.setparents(newctx.node(), nullid)
1518 ds.setparents(newctx.node(), nullid)
1517 s = newctx.status(oldctx, match=match)
1519 s = newctx.status(oldctx, match=match)
1518 for f in s.modified:
1520 for f in s.modified:
1519 if ds[f] == b'r':
1521 if ds[f] == b'r':
1520 # modified + removed -> removed
1522 # modified + removed -> removed
1521 continue
1523 continue
1522 ds.normallookup(f)
1524 ds.normallookup(f)
1523
1525
1524 for f in s.added:
1526 for f in s.added:
1525 if ds[f] == b'r':
1527 if ds[f] == b'r':
1526 # added + removed -> unknown
1528 # added + removed -> unknown
1527 ds.drop(f)
1529 ds.drop(f)
1528 elif ds[f] != b'a':
1530 elif ds[f] != b'a':
1529 ds.add(f)
1531 ds.add(f)
1530
1532
1531 for f in s.removed:
1533 for f in s.removed:
1532 if ds[f] == b'a':
1534 if ds[f] == b'a':
1533 # removed + added -> normal
1535 # removed + added -> normal
1534 ds.normallookup(f)
1536 ds.normallookup(f)
1535 elif ds[f] != b'r':
1537 elif ds[f] != b'r':
1536 ds.remove(f)
1538 ds.remove(f)
1537
1539
1538 # Merge old parent and old working dir copies
1540 # Merge old parent and old working dir copies
1539 oldcopies = copiesmod.pathcopies(newctx, oldctx, match)
1541 oldcopies = copiesmod.pathcopies(newctx, oldctx, match)
1540 oldcopies.update(copies)
1542 oldcopies.update(copies)
1541 copies = {
1543 copies = {
1542 dst: oldcopies.get(src, src)
1544 dst: oldcopies.get(src, src)
1543 for dst, src in pycompat.iteritems(oldcopies)
1545 for dst, src in pycompat.iteritems(oldcopies)
1544 }
1546 }
1545 # Adjust the dirstate copies
1547 # Adjust the dirstate copies
1546 for dst, src in pycompat.iteritems(copies):
1548 for dst, src in pycompat.iteritems(copies):
1547 if src not in newctx or dst in newctx or ds[dst] != b'a':
1549 if src not in newctx or dst in newctx or ds[dst] != b'a':
1548 src = None
1550 src = None
1549 ds.copy(src, dst)
1551 ds.copy(src, dst)
1550 repo._quick_access_changeid_invalidate()
1552 repo._quick_access_changeid_invalidate()
1551
1553
1552
1554
1553 def filterrequirements(requirements):
1555 def filterrequirements(requirements):
1554 """filters the requirements into two sets:
1556 """filters the requirements into two sets:
1555
1557
1556 wcreq: requirements which should be written in .hg/requires
1558 wcreq: requirements which should be written in .hg/requires
1557 storereq: which should be written in .hg/store/requires
1559 storereq: which should be written in .hg/store/requires
1558
1560
1559 Returns (wcreq, storereq)
1561 Returns (wcreq, storereq)
1560 """
1562 """
1561 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
1563 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
1562 wc, store = set(), set()
1564 wc, store = set(), set()
1563 for r in requirements:
1565 for r in requirements:
1564 if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
1566 if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
1565 wc.add(r)
1567 wc.add(r)
1566 else:
1568 else:
1567 store.add(r)
1569 store.add(r)
1568 return wc, store
1570 return wc, store
1569 return requirements, None
1571 return requirements, None
1570
1572
1571
1573
1572 def istreemanifest(repo):
1574 def istreemanifest(repo):
1573 """ returns whether the repository is using treemanifest or not """
1575 """ returns whether the repository is using treemanifest or not """
1574 return requirementsmod.TREEMANIFEST_REQUIREMENT in repo.requirements
1576 return requirementsmod.TREEMANIFEST_REQUIREMENT in repo.requirements
1575
1577
1576
1578
1577 def writereporequirements(repo, requirements=None):
1579 def writereporequirements(repo, requirements=None):
1578 """writes requirements for the repo
1580 """writes requirements for the repo
1579
1581
1580 Requirements are written to .hg/requires and .hg/store/requires based
1582 Requirements are written to .hg/requires and .hg/store/requires based
1581 on whether share-safe mode is enabled and which requirements are wdir
1583 on whether share-safe mode is enabled and which requirements are wdir
1582 requirements and which are store requirements
1584 requirements and which are store requirements
1583 """
1585 """
1584 if requirements:
1586 if requirements:
1585 repo.requirements = requirements
1587 repo.requirements = requirements
1586 wcreq, storereq = filterrequirements(repo.requirements)
1588 wcreq, storereq = filterrequirements(repo.requirements)
1587 if wcreq is not None:
1589 if wcreq is not None:
1588 writerequires(repo.vfs, wcreq)
1590 writerequires(repo.vfs, wcreq)
1589 if storereq is not None:
1591 if storereq is not None:
1590 writerequires(repo.svfs, storereq)
1592 writerequires(repo.svfs, storereq)
1591 elif repo.ui.configbool(b'format', b'usestore'):
1593 elif repo.ui.configbool(b'format', b'usestore'):
1592 # only remove store requires if we are using store
1594 # only remove store requires if we are using store
1593 repo.svfs.tryunlink(b'requires')
1595 repo.svfs.tryunlink(b'requires')
1594
1596
1595
1597
1596 def writerequires(opener, requirements):
1598 def writerequires(opener, requirements):
1597 with opener(b'requires', b'w', atomictemp=True) as fp:
1599 with opener(b'requires', b'w', atomictemp=True) as fp:
1598 for r in sorted(requirements):
1600 for r in sorted(requirements):
1599 fp.write(b"%s\n" % r)
1601 fp.write(b"%s\n" % r)
1600
1602
1601
1603
1602 class filecachesubentry(object):
1604 class filecachesubentry(object):
1603 def __init__(self, path, stat):
1605 def __init__(self, path, stat):
1604 self.path = path
1606 self.path = path
1605 self.cachestat = None
1607 self.cachestat = None
1606 self._cacheable = None
1608 self._cacheable = None
1607
1609
1608 if stat:
1610 if stat:
1609 self.cachestat = filecachesubentry.stat(self.path)
1611 self.cachestat = filecachesubentry.stat(self.path)
1610
1612
1611 if self.cachestat:
1613 if self.cachestat:
1612 self._cacheable = self.cachestat.cacheable()
1614 self._cacheable = self.cachestat.cacheable()
1613 else:
1615 else:
1614 # None means we don't know yet
1616 # None means we don't know yet
1615 self._cacheable = None
1617 self._cacheable = None
1616
1618
1617 def refresh(self):
1619 def refresh(self):
1618 if self.cacheable():
1620 if self.cacheable():
1619 self.cachestat = filecachesubentry.stat(self.path)
1621 self.cachestat = filecachesubentry.stat(self.path)
1620
1622
1621 def cacheable(self):
1623 def cacheable(self):
1622 if self._cacheable is not None:
1624 if self._cacheable is not None:
1623 return self._cacheable
1625 return self._cacheable
1624
1626
1625 # we don't know yet, assume it is for now
1627 # we don't know yet, assume it is for now
1626 return True
1628 return True
1627
1629
1628 def changed(self):
1630 def changed(self):
1629 # no point in going further if we can't cache it
1631 # no point in going further if we can't cache it
1630 if not self.cacheable():
1632 if not self.cacheable():
1631 return True
1633 return True
1632
1634
1633 newstat = filecachesubentry.stat(self.path)
1635 newstat = filecachesubentry.stat(self.path)
1634
1636
1635 # we may not know if it's cacheable yet, check again now
1637 # we may not know if it's cacheable yet, check again now
1636 if newstat and self._cacheable is None:
1638 if newstat and self._cacheable is None:
1637 self._cacheable = newstat.cacheable()
1639 self._cacheable = newstat.cacheable()
1638
1640
1639 # check again
1641 # check again
1640 if not self._cacheable:
1642 if not self._cacheable:
1641 return True
1643 return True
1642
1644
1643 if self.cachestat != newstat:
1645 if self.cachestat != newstat:
1644 self.cachestat = newstat
1646 self.cachestat = newstat
1645 return True
1647 return True
1646 else:
1648 else:
1647 return False
1649 return False
1648
1650
1649 @staticmethod
1651 @staticmethod
1650 def stat(path):
1652 def stat(path):
1651 try:
1653 try:
1652 return util.cachestat(path)
1654 return util.cachestat(path)
1653 except OSError as e:
1655 except OSError as e:
1654 if e.errno != errno.ENOENT:
1656 if e.errno != errno.ENOENT:
1655 raise
1657 raise
1656
1658
1657
1659
1658 class filecacheentry(object):
1660 class filecacheentry(object):
1659 def __init__(self, paths, stat=True):
1661 def __init__(self, paths, stat=True):
1660 self._entries = []
1662 self._entries = []
1661 for path in paths:
1663 for path in paths:
1662 self._entries.append(filecachesubentry(path, stat))
1664 self._entries.append(filecachesubentry(path, stat))
1663
1665
1664 def changed(self):
1666 def changed(self):
1665 '''true if any entry has changed'''
1667 '''true if any entry has changed'''
1666 for entry in self._entries:
1668 for entry in self._entries:
1667 if entry.changed():
1669 if entry.changed():
1668 return True
1670 return True
1669 return False
1671 return False
1670
1672
1671 def refresh(self):
1673 def refresh(self):
1672 for entry in self._entries:
1674 for entry in self._entries:
1673 entry.refresh()
1675 entry.refresh()
1674
1676
1675
1677
1676 class filecache(object):
1678 class filecache(object):
1677 """A property like decorator that tracks files under .hg/ for updates.
1679 """A property like decorator that tracks files under .hg/ for updates.
1678
1680
1679 On first access, the files defined as arguments are stat()ed and the
1681 On first access, the files defined as arguments are stat()ed and the
1680 results cached. The decorated function is called. The results are stashed
1682 results cached. The decorated function is called. The results are stashed
1681 away in a ``_filecache`` dict on the object whose method is decorated.
1683 away in a ``_filecache`` dict on the object whose method is decorated.
1682
1684
1683 On subsequent access, the cached result is used as it is set to the
1685 On subsequent access, the cached result is used as it is set to the
1684 instance dictionary.
1686 instance dictionary.
1685
1687
1686 On external property set/delete operations, the caller must update the
1688 On external property set/delete operations, the caller must update the
1687 corresponding _filecache entry appropriately. Use __class__.<attr>.set()
1689 corresponding _filecache entry appropriately. Use __class__.<attr>.set()
1688 instead of directly setting <attr>.
1690 instead of directly setting <attr>.
1689
1691
1690 When using the property API, the cached data is always used if available.
1692 When using the property API, the cached data is always used if available.
1691 No stat() is performed to check if the file has changed.
1693 No stat() is performed to check if the file has changed.
1692
1694
1693 Others can muck about with the state of the ``_filecache`` dict. e.g. they
1695 Others can muck about with the state of the ``_filecache`` dict. e.g. they
1694 can populate an entry before the property's getter is called. In this case,
1696 can populate an entry before the property's getter is called. In this case,
1695 entries in ``_filecache`` will be used during property operations,
1697 entries in ``_filecache`` will be used during property operations,
1696 if available. If the underlying file changes, it is up to external callers
1698 if available. If the underlying file changes, it is up to external callers
1697 to reflect this by e.g. calling ``delattr(obj, attr)`` to remove the cached
1699 to reflect this by e.g. calling ``delattr(obj, attr)`` to remove the cached
1698 method result as well as possibly calling ``del obj._filecache[attr]`` to
1700 method result as well as possibly calling ``del obj._filecache[attr]`` to
1699 remove the ``filecacheentry``.
1701 remove the ``filecacheentry``.
1700 """
1702 """
1701
1703
1702 def __init__(self, *paths):
1704 def __init__(self, *paths):
1703 self.paths = paths
1705 self.paths = paths
1704
1706
1705 def join(self, obj, fname):
1707 def join(self, obj, fname):
1706 """Used to compute the runtime path of a cached file.
1708 """Used to compute the runtime path of a cached file.
1707
1709
1708 Users should subclass filecache and provide their own version of this
1710 Users should subclass filecache and provide their own version of this
1709 function to call the appropriate join function on 'obj' (an instance
1711 function to call the appropriate join function on 'obj' (an instance
1710 of the class that its member function was decorated).
1712 of the class that its member function was decorated).
1711 """
1713 """
1712 raise NotImplementedError
1714 raise NotImplementedError
1713
1715
1714 def __call__(self, func):
1716 def __call__(self, func):
1715 self.func = func
1717 self.func = func
1716 self.sname = func.__name__
1718 self.sname = func.__name__
1717 self.name = pycompat.sysbytes(self.sname)
1719 self.name = pycompat.sysbytes(self.sname)
1718 return self
1720 return self
1719
1721
1720 def __get__(self, obj, type=None):
1722 def __get__(self, obj, type=None):
1721 # if accessed on the class, return the descriptor itself.
1723 # if accessed on the class, return the descriptor itself.
1722 if obj is None:
1724 if obj is None:
1723 return self
1725 return self
1724
1726
1725 assert self.sname not in obj.__dict__
1727 assert self.sname not in obj.__dict__
1726
1728
1727 entry = obj._filecache.get(self.name)
1729 entry = obj._filecache.get(self.name)
1728
1730
1729 if entry:
1731 if entry:
1730 if entry.changed():
1732 if entry.changed():
1731 entry.obj = self.func(obj)
1733 entry.obj = self.func(obj)
1732 else:
1734 else:
1733 paths = [self.join(obj, path) for path in self.paths]
1735 paths = [self.join(obj, path) for path in self.paths]
1734
1736
1735 # We stat -before- creating the object so our cache doesn't lie if
1737 # We stat -before- creating the object so our cache doesn't lie if
1736 # a writer modified between the time we read and stat
1738 # a writer modified between the time we read and stat
1737 entry = filecacheentry(paths, True)
1739 entry = filecacheentry(paths, True)
1738 entry.obj = self.func(obj)
1740 entry.obj = self.func(obj)
1739
1741
1740 obj._filecache[self.name] = entry
1742 obj._filecache[self.name] = entry
1741
1743
1742 obj.__dict__[self.sname] = entry.obj
1744 obj.__dict__[self.sname] = entry.obj
1743 return entry.obj
1745 return entry.obj
1744
1746
1745 # don't implement __set__(), which would make __dict__ lookup as slow as
1747 # don't implement __set__(), which would make __dict__ lookup as slow as
1746 # function call.
1748 # function call.
1747
1749
1748 def set(self, obj, value):
1750 def set(self, obj, value):
1749 if self.name not in obj._filecache:
1751 if self.name not in obj._filecache:
1750 # we add an entry for the missing value because X in __dict__
1752 # we add an entry for the missing value because X in __dict__
1751 # implies X in _filecache
1753 # implies X in _filecache
1752 paths = [self.join(obj, path) for path in self.paths]
1754 paths = [self.join(obj, path) for path in self.paths]
1753 ce = filecacheentry(paths, False)
1755 ce = filecacheentry(paths, False)
1754 obj._filecache[self.name] = ce
1756 obj._filecache[self.name] = ce
1755 else:
1757 else:
1756 ce = obj._filecache[self.name]
1758 ce = obj._filecache[self.name]
1757
1759
1758 ce.obj = value # update cached copy
1760 ce.obj = value # update cached copy
1759 obj.__dict__[self.sname] = value # update copy returned by obj.x
1761 obj.__dict__[self.sname] = value # update copy returned by obj.x
1760
1762
1761
1763
1762 def extdatasource(repo, source):
1764 def extdatasource(repo, source):
1763 """Gather a map of rev -> value dict from the specified source
1765 """Gather a map of rev -> value dict from the specified source
1764
1766
1765 A source spec is treated as a URL, with a special case shell: type
1767 A source spec is treated as a URL, with a special case shell: type
1766 for parsing the output from a shell command.
1768 for parsing the output from a shell command.
1767
1769
1768 The data is parsed as a series of newline-separated records where
1770 The data is parsed as a series of newline-separated records where
1769 each record is a revision specifier optionally followed by a space
1771 each record is a revision specifier optionally followed by a space
1770 and a freeform string value. If the revision is known locally, it
1772 and a freeform string value. If the revision is known locally, it
1771 is converted to a rev, otherwise the record is skipped.
1773 is converted to a rev, otherwise the record is skipped.
1772
1774
1773 Note that both key and value are treated as UTF-8 and converted to
1775 Note that both key and value are treated as UTF-8 and converted to
1774 the local encoding. This allows uniformity between local and
1776 the local encoding. This allows uniformity between local and
1775 remote data sources.
1777 remote data sources.
1776 """
1778 """
1777
1779
1778 spec = repo.ui.config(b"extdata", source)
1780 spec = repo.ui.config(b"extdata", source)
1779 if not spec:
1781 if not spec:
1780 raise error.Abort(_(b"unknown extdata source '%s'") % source)
1782 raise error.Abort(_(b"unknown extdata source '%s'") % source)
1781
1783
1782 data = {}
1784 data = {}
1783 src = proc = None
1785 src = proc = None
1784 try:
1786 try:
1785 if spec.startswith(b"shell:"):
1787 if spec.startswith(b"shell:"):
1786 # external commands should be run relative to the repo root
1788 # external commands should be run relative to the repo root
1787 cmd = spec[6:]
1789 cmd = spec[6:]
1788 proc = subprocess.Popen(
1790 proc = subprocess.Popen(
1789 procutil.tonativestr(cmd),
1791 procutil.tonativestr(cmd),
1790 shell=True,
1792 shell=True,
1791 bufsize=-1,
1793 bufsize=-1,
1792 close_fds=procutil.closefds,
1794 close_fds=procutil.closefds,
1793 stdout=subprocess.PIPE,
1795 stdout=subprocess.PIPE,
1794 cwd=procutil.tonativestr(repo.root),
1796 cwd=procutil.tonativestr(repo.root),
1795 )
1797 )
1796 src = proc.stdout
1798 src = proc.stdout
1797 else:
1799 else:
1798 # treat as a URL or file
1800 # treat as a URL or file
1799 src = url.open(repo.ui, spec)
1801 src = url.open(repo.ui, spec)
1800 for l in src:
1802 for l in src:
1801 if b" " in l:
1803 if b" " in l:
1802 k, v = l.strip().split(b" ", 1)
1804 k, v = l.strip().split(b" ", 1)
1803 else:
1805 else:
1804 k, v = l.strip(), b""
1806 k, v = l.strip(), b""
1805
1807
1806 k = encoding.tolocal(k)
1808 k = encoding.tolocal(k)
1807 try:
1809 try:
1808 data[revsingle(repo, k).rev()] = encoding.tolocal(v)
1810 data[revsingle(repo, k).rev()] = encoding.tolocal(v)
1809 except (error.LookupError, error.RepoLookupError, error.InputError):
1811 except (error.LookupError, error.RepoLookupError, error.InputError):
1810 pass # we ignore data for nodes that don't exist locally
1812 pass # we ignore data for nodes that don't exist locally
1811 finally:
1813 finally:
1812 if proc:
1814 if proc:
1813 try:
1815 try:
1814 proc.communicate()
1816 proc.communicate()
1815 except ValueError:
1817 except ValueError:
1816 # This happens if we started iterating src and then
1818 # This happens if we started iterating src and then
1817 # get a parse error on a line. It should be safe to ignore.
1819 # get a parse error on a line. It should be safe to ignore.
1818 pass
1820 pass
1819 if src:
1821 if src:
1820 src.close()
1822 src.close()
1821 if proc and proc.returncode != 0:
1823 if proc and proc.returncode != 0:
1822 raise error.Abort(
1824 raise error.Abort(
1823 _(b"extdata command '%s' failed: %s")
1825 _(b"extdata command '%s' failed: %s")
1824 % (cmd, procutil.explainexit(proc.returncode))
1826 % (cmd, procutil.explainexit(proc.returncode))
1825 )
1827 )
1826
1828
1827 return data
1829 return data
1828
1830
1829
1831
1830 class progress(object):
1832 class progress(object):
1831 def __init__(self, ui, updatebar, topic, unit=b"", total=None):
1833 def __init__(self, ui, updatebar, topic, unit=b"", total=None):
1832 self.ui = ui
1834 self.ui = ui
1833 self.pos = 0
1835 self.pos = 0
1834 self.topic = topic
1836 self.topic = topic
1835 self.unit = unit
1837 self.unit = unit
1836 self.total = total
1838 self.total = total
1837 self.debug = ui.configbool(b'progress', b'debug')
1839 self.debug = ui.configbool(b'progress', b'debug')
1838 self._updatebar = updatebar
1840 self._updatebar = updatebar
1839
1841
1840 def __enter__(self):
1842 def __enter__(self):
1841 return self
1843 return self
1842
1844
1843 def __exit__(self, exc_type, exc_value, exc_tb):
1845 def __exit__(self, exc_type, exc_value, exc_tb):
1844 self.complete()
1846 self.complete()
1845
1847
1846 def update(self, pos, item=b"", total=None):
1848 def update(self, pos, item=b"", total=None):
1847 assert pos is not None
1849 assert pos is not None
1848 if total:
1850 if total:
1849 self.total = total
1851 self.total = total
1850 self.pos = pos
1852 self.pos = pos
1851 self._updatebar(self.topic, self.pos, item, self.unit, self.total)
1853 self._updatebar(self.topic, self.pos, item, self.unit, self.total)
1852 if self.debug:
1854 if self.debug:
1853 self._printdebug(item)
1855 self._printdebug(item)
1854
1856
1855 def increment(self, step=1, item=b"", total=None):
1857 def increment(self, step=1, item=b"", total=None):
1856 self.update(self.pos + step, item, total)
1858 self.update(self.pos + step, item, total)
1857
1859
1858 def complete(self):
1860 def complete(self):
1859 self.pos = None
1861 self.pos = None
1860 self.unit = b""
1862 self.unit = b""
1861 self.total = None
1863 self.total = None
1862 self._updatebar(self.topic, self.pos, b"", self.unit, self.total)
1864 self._updatebar(self.topic, self.pos, b"", self.unit, self.total)
1863
1865
1864 def _printdebug(self, item):
1866 def _printdebug(self, item):
1865 unit = b''
1867 unit = b''
1866 if self.unit:
1868 if self.unit:
1867 unit = b' ' + self.unit
1869 unit = b' ' + self.unit
1868 if item:
1870 if item:
1869 item = b' ' + item
1871 item = b' ' + item
1870
1872
1871 if self.total:
1873 if self.total:
1872 pct = 100.0 * self.pos / self.total
1874 pct = 100.0 * self.pos / self.total
1873 self.ui.debug(
1875 self.ui.debug(
1874 b'%s:%s %d/%d%s (%4.2f%%)\n'
1876 b'%s:%s %d/%d%s (%4.2f%%)\n'
1875 % (self.topic, item, self.pos, self.total, unit, pct)
1877 % (self.topic, item, self.pos, self.total, unit, pct)
1876 )
1878 )
1877 else:
1879 else:
1878 self.ui.debug(b'%s:%s %d%s\n' % (self.topic, item, self.pos, unit))
1880 self.ui.debug(b'%s:%s %d%s\n' % (self.topic, item, self.pos, unit))
1879
1881
1880
1882
1881 def gdinitconfig(ui):
1883 def gdinitconfig(ui):
1882 """helper function to know if a repo should be created as general delta"""
1884 """helper function to know if a repo should be created as general delta"""
1883 # experimental config: format.generaldelta
1885 # experimental config: format.generaldelta
1884 return ui.configbool(b'format', b'generaldelta') or ui.configbool(
1886 return ui.configbool(b'format', b'generaldelta') or ui.configbool(
1885 b'format', b'usegeneraldelta'
1887 b'format', b'usegeneraldelta'
1886 )
1888 )
1887
1889
1888
1890
1889 def gddeltaconfig(ui):
1891 def gddeltaconfig(ui):
1890 """helper function to know if incoming delta should be optimised"""
1892 """helper function to know if incoming delta should be optimised"""
1891 # experimental config: format.generaldelta
1893 # experimental config: format.generaldelta
1892 return ui.configbool(b'format', b'generaldelta')
1894 return ui.configbool(b'format', b'generaldelta')
1893
1895
1894
1896
1895 class simplekeyvaluefile(object):
1897 class simplekeyvaluefile(object):
1896 """A simple file with key=value lines
1898 """A simple file with key=value lines
1897
1899
1898 Keys must be alphanumerics and start with a letter, values must not
1900 Keys must be alphanumerics and start with a letter, values must not
1899 contain '\n' characters"""
1901 contain '\n' characters"""
1900
1902
1901 firstlinekey = b'__firstline'
1903 firstlinekey = b'__firstline'
1902
1904
1903 def __init__(self, vfs, path, keys=None):
1905 def __init__(self, vfs, path, keys=None):
1904 self.vfs = vfs
1906 self.vfs = vfs
1905 self.path = path
1907 self.path = path
1906
1908
1907 def read(self, firstlinenonkeyval=False):
1909 def read(self, firstlinenonkeyval=False):
1908 """Read the contents of a simple key-value file
1910 """Read the contents of a simple key-value file
1909
1911
1910 'firstlinenonkeyval' indicates whether the first line of file should
1912 'firstlinenonkeyval' indicates whether the first line of file should
1911 be treated as a key-value pair or reuturned fully under the
1913 be treated as a key-value pair or reuturned fully under the
1912 __firstline key."""
1914 __firstline key."""
1913 lines = self.vfs.readlines(self.path)
1915 lines = self.vfs.readlines(self.path)
1914 d = {}
1916 d = {}
1915 if firstlinenonkeyval:
1917 if firstlinenonkeyval:
1916 if not lines:
1918 if not lines:
1917 e = _(b"empty simplekeyvalue file")
1919 e = _(b"empty simplekeyvalue file")
1918 raise error.CorruptedState(e)
1920 raise error.CorruptedState(e)
1919 # we don't want to include '\n' in the __firstline
1921 # we don't want to include '\n' in the __firstline
1920 d[self.firstlinekey] = lines[0][:-1]
1922 d[self.firstlinekey] = lines[0][:-1]
1921 del lines[0]
1923 del lines[0]
1922
1924
1923 try:
1925 try:
1924 # the 'if line.strip()' part prevents us from failing on empty
1926 # the 'if line.strip()' part prevents us from failing on empty
1925 # lines which only contain '\n' therefore are not skipped
1927 # lines which only contain '\n' therefore are not skipped
1926 # by 'if line'
1928 # by 'if line'
1927 updatedict = dict(
1929 updatedict = dict(
1928 line[:-1].split(b'=', 1) for line in lines if line.strip()
1930 line[:-1].split(b'=', 1) for line in lines if line.strip()
1929 )
1931 )
1930 if self.firstlinekey in updatedict:
1932 if self.firstlinekey in updatedict:
1931 e = _(b"%r can't be used as a key")
1933 e = _(b"%r can't be used as a key")
1932 raise error.CorruptedState(e % self.firstlinekey)
1934 raise error.CorruptedState(e % self.firstlinekey)
1933 d.update(updatedict)
1935 d.update(updatedict)
1934 except ValueError as e:
1936 except ValueError as e:
1935 raise error.CorruptedState(stringutil.forcebytestr(e))
1937 raise error.CorruptedState(stringutil.forcebytestr(e))
1936 return d
1938 return d
1937
1939
1938 def write(self, data, firstline=None):
1940 def write(self, data, firstline=None):
1939 """Write key=>value mapping to a file
1941 """Write key=>value mapping to a file
1940 data is a dict. Keys must be alphanumerical and start with a letter.
1942 data is a dict. Keys must be alphanumerical and start with a letter.
1941 Values must not contain newline characters.
1943 Values must not contain newline characters.
1942
1944
1943 If 'firstline' is not None, it is written to file before
1945 If 'firstline' is not None, it is written to file before
1944 everything else, as it is, not in a key=value form"""
1946 everything else, as it is, not in a key=value form"""
1945 lines = []
1947 lines = []
1946 if firstline is not None:
1948 if firstline is not None:
1947 lines.append(b'%s\n' % firstline)
1949 lines.append(b'%s\n' % firstline)
1948
1950
1949 for k, v in data.items():
1951 for k, v in data.items():
1950 if k == self.firstlinekey:
1952 if k == self.firstlinekey:
1951 e = b"key name '%s' is reserved" % self.firstlinekey
1953 e = b"key name '%s' is reserved" % self.firstlinekey
1952 raise error.ProgrammingError(e)
1954 raise error.ProgrammingError(e)
1953 if not k[0:1].isalpha():
1955 if not k[0:1].isalpha():
1954 e = b"keys must start with a letter in a key-value file"
1956 e = b"keys must start with a letter in a key-value file"
1955 raise error.ProgrammingError(e)
1957 raise error.ProgrammingError(e)
1956 if not k.isalnum():
1958 if not k.isalnum():
1957 e = b"invalid key name in a simple key-value file"
1959 e = b"invalid key name in a simple key-value file"
1958 raise error.ProgrammingError(e)
1960 raise error.ProgrammingError(e)
1959 if b'\n' in v:
1961 if b'\n' in v:
1960 e = b"invalid value in a simple key-value file"
1962 e = b"invalid value in a simple key-value file"
1961 raise error.ProgrammingError(e)
1963 raise error.ProgrammingError(e)
1962 lines.append(b"%s=%s\n" % (k, v))
1964 lines.append(b"%s=%s\n" % (k, v))
1963 with self.vfs(self.path, mode=b'wb', atomictemp=True) as fp:
1965 with self.vfs(self.path, mode=b'wb', atomictemp=True) as fp:
1964 fp.write(b''.join(lines))
1966 fp.write(b''.join(lines))
1965
1967
1966
1968
1967 _reportobsoletedsource = [
1969 _reportobsoletedsource = [
1968 b'debugobsolete',
1970 b'debugobsolete',
1969 b'pull',
1971 b'pull',
1970 b'push',
1972 b'push',
1971 b'serve',
1973 b'serve',
1972 b'unbundle',
1974 b'unbundle',
1973 ]
1975 ]
1974
1976
1975 _reportnewcssource = [
1977 _reportnewcssource = [
1976 b'pull',
1978 b'pull',
1977 b'unbundle',
1979 b'unbundle',
1978 ]
1980 ]
1979
1981
1980
1982
1981 def prefetchfiles(repo, revmatches):
1983 def prefetchfiles(repo, revmatches):
1982 """Invokes the registered file prefetch functions, allowing extensions to
1984 """Invokes the registered file prefetch functions, allowing extensions to
1983 ensure the corresponding files are available locally, before the command
1985 ensure the corresponding files are available locally, before the command
1984 uses them.
1986 uses them.
1985
1987
1986 Args:
1988 Args:
1987 revmatches: a list of (revision, match) tuples to indicate the files to
1989 revmatches: a list of (revision, match) tuples to indicate the files to
1988 fetch at each revision. If any of the match elements is None, it matches
1990 fetch at each revision. If any of the match elements is None, it matches
1989 all files.
1991 all files.
1990 """
1992 """
1991
1993
1992 def _matcher(m):
1994 def _matcher(m):
1993 if m:
1995 if m:
1994 assert isinstance(m, matchmod.basematcher)
1996 assert isinstance(m, matchmod.basematcher)
1995 # The command itself will complain about files that don't exist, so
1997 # The command itself will complain about files that don't exist, so
1996 # don't duplicate the message.
1998 # don't duplicate the message.
1997 return matchmod.badmatch(m, lambda fn, msg: None)
1999 return matchmod.badmatch(m, lambda fn, msg: None)
1998 else:
2000 else:
1999 return matchall(repo)
2001 return matchall(repo)
2000
2002
2001 revbadmatches = [(rev, _matcher(match)) for (rev, match) in revmatches]
2003 revbadmatches = [(rev, _matcher(match)) for (rev, match) in revmatches]
2002
2004
2003 fileprefetchhooks(repo, revbadmatches)
2005 fileprefetchhooks(repo, revbadmatches)
2004
2006
2005
2007
2006 # a list of (repo, revs, match) prefetch functions
2008 # a list of (repo, revs, match) prefetch functions
2007 fileprefetchhooks = util.hooks()
2009 fileprefetchhooks = util.hooks()
2008
2010
2009 # A marker that tells the evolve extension to suppress its own reporting
2011 # A marker that tells the evolve extension to suppress its own reporting
2010 _reportstroubledchangesets = True
2012 _reportstroubledchangesets = True
2011
2013
2012
2014
2013 def registersummarycallback(repo, otr, txnname=b'', as_validator=False):
2015 def registersummarycallback(repo, otr, txnname=b'', as_validator=False):
2014 """register a callback to issue a summary after the transaction is closed
2016 """register a callback to issue a summary after the transaction is closed
2015
2017
2016 If as_validator is true, then the callbacks are registered as transaction
2018 If as_validator is true, then the callbacks are registered as transaction
2017 validators instead
2019 validators instead
2018 """
2020 """
2019
2021
2020 def txmatch(sources):
2022 def txmatch(sources):
2021 return any(txnname.startswith(source) for source in sources)
2023 return any(txnname.startswith(source) for source in sources)
2022
2024
2023 categories = []
2025 categories = []
2024
2026
2025 def reportsummary(func):
2027 def reportsummary(func):
2026 """decorator for report callbacks."""
2028 """decorator for report callbacks."""
2027 # The repoview life cycle is shorter than the one of the actual
2029 # The repoview life cycle is shorter than the one of the actual
2028 # underlying repository. So the filtered object can die before the
2030 # underlying repository. So the filtered object can die before the
2029 # weakref is used leading to troubles. We keep a reference to the
2031 # weakref is used leading to troubles. We keep a reference to the
2030 # unfiltered object and restore the filtering when retrieving the
2032 # unfiltered object and restore the filtering when retrieving the
2031 # repository through the weakref.
2033 # repository through the weakref.
2032 filtername = repo.filtername
2034 filtername = repo.filtername
2033 reporef = weakref.ref(repo.unfiltered())
2035 reporef = weakref.ref(repo.unfiltered())
2034
2036
2035 def wrapped(tr):
2037 def wrapped(tr):
2036 repo = reporef()
2038 repo = reporef()
2037 if filtername:
2039 if filtername:
2038 assert repo is not None # help pytype
2040 assert repo is not None # help pytype
2039 repo = repo.filtered(filtername)
2041 repo = repo.filtered(filtername)
2040 func(repo, tr)
2042 func(repo, tr)
2041
2043
2042 newcat = b'%02i-txnreport' % len(categories)
2044 newcat = b'%02i-txnreport' % len(categories)
2043 if as_validator:
2045 if as_validator:
2044 otr.addvalidator(newcat, wrapped)
2046 otr.addvalidator(newcat, wrapped)
2045 else:
2047 else:
2046 otr.addpostclose(newcat, wrapped)
2048 otr.addpostclose(newcat, wrapped)
2047 categories.append(newcat)
2049 categories.append(newcat)
2048 return wrapped
2050 return wrapped
2049
2051
2050 @reportsummary
2052 @reportsummary
2051 def reportchangegroup(repo, tr):
2053 def reportchangegroup(repo, tr):
2052 cgchangesets = tr.changes.get(b'changegroup-count-changesets', 0)
2054 cgchangesets = tr.changes.get(b'changegroup-count-changesets', 0)
2053 cgrevisions = tr.changes.get(b'changegroup-count-revisions', 0)
2055 cgrevisions = tr.changes.get(b'changegroup-count-revisions', 0)
2054 cgfiles = tr.changes.get(b'changegroup-count-files', 0)
2056 cgfiles = tr.changes.get(b'changegroup-count-files', 0)
2055 cgheads = tr.changes.get(b'changegroup-count-heads', 0)
2057 cgheads = tr.changes.get(b'changegroup-count-heads', 0)
2056 if cgchangesets or cgrevisions or cgfiles:
2058 if cgchangesets or cgrevisions or cgfiles:
2057 htext = b""
2059 htext = b""
2058 if cgheads:
2060 if cgheads:
2059 htext = _(b" (%+d heads)") % cgheads
2061 htext = _(b" (%+d heads)") % cgheads
2060 msg = _(b"added %d changesets with %d changes to %d files%s\n")
2062 msg = _(b"added %d changesets with %d changes to %d files%s\n")
2061 if as_validator:
2063 if as_validator:
2062 msg = _(b"adding %d changesets with %d changes to %d files%s\n")
2064 msg = _(b"adding %d changesets with %d changes to %d files%s\n")
2063 assert repo is not None # help pytype
2065 assert repo is not None # help pytype
2064 repo.ui.status(msg % (cgchangesets, cgrevisions, cgfiles, htext))
2066 repo.ui.status(msg % (cgchangesets, cgrevisions, cgfiles, htext))
2065
2067
2066 if txmatch(_reportobsoletedsource):
2068 if txmatch(_reportobsoletedsource):
2067
2069
2068 @reportsummary
2070 @reportsummary
2069 def reportobsoleted(repo, tr):
2071 def reportobsoleted(repo, tr):
2070 obsoleted = obsutil.getobsoleted(repo, tr)
2072 obsoleted = obsutil.getobsoleted(repo, tr)
2071 newmarkers = len(tr.changes.get(b'obsmarkers', ()))
2073 newmarkers = len(tr.changes.get(b'obsmarkers', ()))
2072 if newmarkers:
2074 if newmarkers:
2073 repo.ui.status(_(b'%i new obsolescence markers\n') % newmarkers)
2075 repo.ui.status(_(b'%i new obsolescence markers\n') % newmarkers)
2074 if obsoleted:
2076 if obsoleted:
2075 msg = _(b'obsoleted %i changesets\n')
2077 msg = _(b'obsoleted %i changesets\n')
2076 if as_validator:
2078 if as_validator:
2077 msg = _(b'obsoleting %i changesets\n')
2079 msg = _(b'obsoleting %i changesets\n')
2078 repo.ui.status(msg % len(obsoleted))
2080 repo.ui.status(msg % len(obsoleted))
2079
2081
2080 if obsolete.isenabled(
2082 if obsolete.isenabled(
2081 repo, obsolete.createmarkersopt
2083 repo, obsolete.createmarkersopt
2082 ) and repo.ui.configbool(
2084 ) and repo.ui.configbool(
2083 b'experimental', b'evolution.report-instabilities'
2085 b'experimental', b'evolution.report-instabilities'
2084 ):
2086 ):
2085 instabilitytypes = [
2087 instabilitytypes = [
2086 (b'orphan', b'orphan'),
2088 (b'orphan', b'orphan'),
2087 (b'phase-divergent', b'phasedivergent'),
2089 (b'phase-divergent', b'phasedivergent'),
2088 (b'content-divergent', b'contentdivergent'),
2090 (b'content-divergent', b'contentdivergent'),
2089 ]
2091 ]
2090
2092
2091 def getinstabilitycounts(repo):
2093 def getinstabilitycounts(repo):
2092 filtered = repo.changelog.filteredrevs
2094 filtered = repo.changelog.filteredrevs
2093 counts = {}
2095 counts = {}
2094 for instability, revset in instabilitytypes:
2096 for instability, revset in instabilitytypes:
2095 counts[instability] = len(
2097 counts[instability] = len(
2096 set(obsolete.getrevs(repo, revset)) - filtered
2098 set(obsolete.getrevs(repo, revset)) - filtered
2097 )
2099 )
2098 return counts
2100 return counts
2099
2101
2100 oldinstabilitycounts = getinstabilitycounts(repo)
2102 oldinstabilitycounts = getinstabilitycounts(repo)
2101
2103
2102 @reportsummary
2104 @reportsummary
2103 def reportnewinstabilities(repo, tr):
2105 def reportnewinstabilities(repo, tr):
2104 newinstabilitycounts = getinstabilitycounts(repo)
2106 newinstabilitycounts = getinstabilitycounts(repo)
2105 for instability, revset in instabilitytypes:
2107 for instability, revset in instabilitytypes:
2106 delta = (
2108 delta = (
2107 newinstabilitycounts[instability]
2109 newinstabilitycounts[instability]
2108 - oldinstabilitycounts[instability]
2110 - oldinstabilitycounts[instability]
2109 )
2111 )
2110 msg = getinstabilitymessage(delta, instability)
2112 msg = getinstabilitymessage(delta, instability)
2111 if msg:
2113 if msg:
2112 repo.ui.warn(msg)
2114 repo.ui.warn(msg)
2113
2115
2114 if txmatch(_reportnewcssource):
2116 if txmatch(_reportnewcssource):
2115
2117
2116 @reportsummary
2118 @reportsummary
2117 def reportnewcs(repo, tr):
2119 def reportnewcs(repo, tr):
2118 """Report the range of new revisions pulled/unbundled."""
2120 """Report the range of new revisions pulled/unbundled."""
2119 origrepolen = tr.changes.get(b'origrepolen', len(repo))
2121 origrepolen = tr.changes.get(b'origrepolen', len(repo))
2120 unfi = repo.unfiltered()
2122 unfi = repo.unfiltered()
2121 if origrepolen >= len(unfi):
2123 if origrepolen >= len(unfi):
2122 return
2124 return
2123
2125
2124 # Compute the bounds of new visible revisions' range.
2126 # Compute the bounds of new visible revisions' range.
2125 revs = smartset.spanset(repo, start=origrepolen)
2127 revs = smartset.spanset(repo, start=origrepolen)
2126 if revs:
2128 if revs:
2127 minrev, maxrev = repo[revs.min()], repo[revs.max()]
2129 minrev, maxrev = repo[revs.min()], repo[revs.max()]
2128
2130
2129 if minrev == maxrev:
2131 if minrev == maxrev:
2130 revrange = minrev
2132 revrange = minrev
2131 else:
2133 else:
2132 revrange = b'%s:%s' % (minrev, maxrev)
2134 revrange = b'%s:%s' % (minrev, maxrev)
2133 draft = len(repo.revs(b'%ld and draft()', revs))
2135 draft = len(repo.revs(b'%ld and draft()', revs))
2134 secret = len(repo.revs(b'%ld and secret()', revs))
2136 secret = len(repo.revs(b'%ld and secret()', revs))
2135 if not (draft or secret):
2137 if not (draft or secret):
2136 msg = _(b'new changesets %s\n') % revrange
2138 msg = _(b'new changesets %s\n') % revrange
2137 elif draft and secret:
2139 elif draft and secret:
2138 msg = _(b'new changesets %s (%d drafts, %d secrets)\n')
2140 msg = _(b'new changesets %s (%d drafts, %d secrets)\n')
2139 msg %= (revrange, draft, secret)
2141 msg %= (revrange, draft, secret)
2140 elif draft:
2142 elif draft:
2141 msg = _(b'new changesets %s (%d drafts)\n')
2143 msg = _(b'new changesets %s (%d drafts)\n')
2142 msg %= (revrange, draft)
2144 msg %= (revrange, draft)
2143 elif secret:
2145 elif secret:
2144 msg = _(b'new changesets %s (%d secrets)\n')
2146 msg = _(b'new changesets %s (%d secrets)\n')
2145 msg %= (revrange, secret)
2147 msg %= (revrange, secret)
2146 else:
2148 else:
2147 errormsg = b'entered unreachable condition'
2149 errormsg = b'entered unreachable condition'
2148 raise error.ProgrammingError(errormsg)
2150 raise error.ProgrammingError(errormsg)
2149 repo.ui.status(msg)
2151 repo.ui.status(msg)
2150
2152
2151 # search new changesets directly pulled as obsolete
2153 # search new changesets directly pulled as obsolete
2152 duplicates = tr.changes.get(b'revduplicates', ())
2154 duplicates = tr.changes.get(b'revduplicates', ())
2153 obsadded = unfi.revs(
2155 obsadded = unfi.revs(
2154 b'(%d: + %ld) and obsolete()', origrepolen, duplicates
2156 b'(%d: + %ld) and obsolete()', origrepolen, duplicates
2155 )
2157 )
2156 cl = repo.changelog
2158 cl = repo.changelog
2157 extinctadded = [r for r in obsadded if r not in cl]
2159 extinctadded = [r for r in obsadded if r not in cl]
2158 if extinctadded:
2160 if extinctadded:
2159 # They are not just obsolete, but obsolete and invisible
2161 # They are not just obsolete, but obsolete and invisible
2160 # we call them "extinct" internally but the terms have not been
2162 # we call them "extinct" internally but the terms have not been
2161 # exposed to users.
2163 # exposed to users.
2162 msg = b'(%d other changesets obsolete on arrival)\n'
2164 msg = b'(%d other changesets obsolete on arrival)\n'
2163 repo.ui.status(msg % len(extinctadded))
2165 repo.ui.status(msg % len(extinctadded))
2164
2166
2165 @reportsummary
2167 @reportsummary
2166 def reportphasechanges(repo, tr):
2168 def reportphasechanges(repo, tr):
2167 """Report statistics of phase changes for changesets pre-existing
2169 """Report statistics of phase changes for changesets pre-existing
2168 pull/unbundle.
2170 pull/unbundle.
2169 """
2171 """
2170 origrepolen = tr.changes.get(b'origrepolen', len(repo))
2172 origrepolen = tr.changes.get(b'origrepolen', len(repo))
2171 published = []
2173 published = []
2172 for revs, (old, new) in tr.changes.get(b'phases', []):
2174 for revs, (old, new) in tr.changes.get(b'phases', []):
2173 if new != phases.public:
2175 if new != phases.public:
2174 continue
2176 continue
2175 published.extend(rev for rev in revs if rev < origrepolen)
2177 published.extend(rev for rev in revs if rev < origrepolen)
2176 if not published:
2178 if not published:
2177 return
2179 return
2178 msg = _(b'%d local changesets published\n')
2180 msg = _(b'%d local changesets published\n')
2179 if as_validator:
2181 if as_validator:
2180 msg = _(b'%d local changesets will be published\n')
2182 msg = _(b'%d local changesets will be published\n')
2181 repo.ui.status(msg % len(published))
2183 repo.ui.status(msg % len(published))
2182
2184
2183
2185
2184 def getinstabilitymessage(delta, instability):
2186 def getinstabilitymessage(delta, instability):
2185 """function to return the message to show warning about new instabilities
2187 """function to return the message to show warning about new instabilities
2186
2188
2187 exists as a separate function so that extension can wrap to show more
2189 exists as a separate function so that extension can wrap to show more
2188 information like how to fix instabilities"""
2190 information like how to fix instabilities"""
2189 if delta > 0:
2191 if delta > 0:
2190 return _(b'%i new %s changesets\n') % (delta, instability)
2192 return _(b'%i new %s changesets\n') % (delta, instability)
2191
2193
2192
2194
2193 def nodesummaries(repo, nodes, maxnumnodes=4):
2195 def nodesummaries(repo, nodes, maxnumnodes=4):
2194 if len(nodes) <= maxnumnodes or repo.ui.verbose:
2196 if len(nodes) <= maxnumnodes or repo.ui.verbose:
2195 return b' '.join(short(h) for h in nodes)
2197 return b' '.join(short(h) for h in nodes)
2196 first = b' '.join(short(h) for h in nodes[:maxnumnodes])
2198 first = b' '.join(short(h) for h in nodes[:maxnumnodes])
2197 return _(b"%s and %d others") % (first, len(nodes) - maxnumnodes)
2199 return _(b"%s and %d others") % (first, len(nodes) - maxnumnodes)
2198
2200
2199
2201
2200 def enforcesinglehead(repo, tr, desc, accountclosed, filtername):
2202 def enforcesinglehead(repo, tr, desc, accountclosed, filtername):
2201 """check that no named branch has multiple heads"""
2203 """check that no named branch has multiple heads"""
2202 if desc in (b'strip', b'repair'):
2204 if desc in (b'strip', b'repair'):
2203 # skip the logic during strip
2205 # skip the logic during strip
2204 return
2206 return
2205 visible = repo.filtered(filtername)
2207 visible = repo.filtered(filtername)
2206 # possible improvement: we could restrict the check to affected branch
2208 # possible improvement: we could restrict the check to affected branch
2207 bm = visible.branchmap()
2209 bm = visible.branchmap()
2208 for name in bm:
2210 for name in bm:
2209 heads = bm.branchheads(name, closed=accountclosed)
2211 heads = bm.branchheads(name, closed=accountclosed)
2210 if len(heads) > 1:
2212 if len(heads) > 1:
2211 msg = _(b'rejecting multiple heads on branch "%s"')
2213 msg = _(b'rejecting multiple heads on branch "%s"')
2212 msg %= name
2214 msg %= name
2213 hint = _(b'%d heads: %s')
2215 hint = _(b'%d heads: %s')
2214 hint %= (len(heads), nodesummaries(repo, heads))
2216 hint %= (len(heads), nodesummaries(repo, heads))
2215 raise error.Abort(msg, hint=hint)
2217 raise error.Abort(msg, hint=hint)
2216
2218
2217
2219
2218 def wrapconvertsink(sink):
2220 def wrapconvertsink(sink):
2219 """Allow extensions to wrap the sink returned by convcmd.convertsink()
2221 """Allow extensions to wrap the sink returned by convcmd.convertsink()
2220 before it is used, whether or not the convert extension was formally loaded.
2222 before it is used, whether or not the convert extension was formally loaded.
2221 """
2223 """
2222 return sink
2224 return sink
2223
2225
2224
2226
2225 def unhidehashlikerevs(repo, specs, hiddentype):
2227 def unhidehashlikerevs(repo, specs, hiddentype):
2226 """parse the user specs and unhide changesets whose hash or revision number
2228 """parse the user specs and unhide changesets whose hash or revision number
2227 is passed.
2229 is passed.
2228
2230
2229 hiddentype can be: 1) 'warn': warn while unhiding changesets
2231 hiddentype can be: 1) 'warn': warn while unhiding changesets
2230 2) 'nowarn': don't warn while unhiding changesets
2232 2) 'nowarn': don't warn while unhiding changesets
2231
2233
2232 returns a repo object with the required changesets unhidden
2234 returns a repo object with the required changesets unhidden
2233 """
2235 """
2234 if not repo.filtername or not repo.ui.configbool(
2236 if not repo.filtername or not repo.ui.configbool(
2235 b'experimental', b'directaccess'
2237 b'experimental', b'directaccess'
2236 ):
2238 ):
2237 return repo
2239 return repo
2238
2240
2239 if repo.filtername not in (b'visible', b'visible-hidden'):
2241 if repo.filtername not in (b'visible', b'visible-hidden'):
2240 return repo
2242 return repo
2241
2243
2242 symbols = set()
2244 symbols = set()
2243 for spec in specs:
2245 for spec in specs:
2244 try:
2246 try:
2245 tree = revsetlang.parse(spec)
2247 tree = revsetlang.parse(spec)
2246 except error.ParseError: # will be reported by scmutil.revrange()
2248 except error.ParseError: # will be reported by scmutil.revrange()
2247 continue
2249 continue
2248
2250
2249 symbols.update(revsetlang.gethashlikesymbols(tree))
2251 symbols.update(revsetlang.gethashlikesymbols(tree))
2250
2252
2251 if not symbols:
2253 if not symbols:
2252 return repo
2254 return repo
2253
2255
2254 revs = _getrevsfromsymbols(repo, symbols)
2256 revs = _getrevsfromsymbols(repo, symbols)
2255
2257
2256 if not revs:
2258 if not revs:
2257 return repo
2259 return repo
2258
2260
2259 if hiddentype == b'warn':
2261 if hiddentype == b'warn':
2260 unfi = repo.unfiltered()
2262 unfi = repo.unfiltered()
2261 revstr = b", ".join([pycompat.bytestr(unfi[l]) for l in revs])
2263 revstr = b", ".join([pycompat.bytestr(unfi[l]) for l in revs])
2262 repo.ui.warn(
2264 repo.ui.warn(
2263 _(
2265 _(
2264 b"warning: accessing hidden changesets for write "
2266 b"warning: accessing hidden changesets for write "
2265 b"operation: %s\n"
2267 b"operation: %s\n"
2266 )
2268 )
2267 % revstr
2269 % revstr
2268 )
2270 )
2269
2271
2270 # we have to use new filtername to separate branch/tags cache until we can
2272 # we have to use new filtername to separate branch/tags cache until we can
2271 # disbale these cache when revisions are dynamically pinned.
2273 # disbale these cache when revisions are dynamically pinned.
2272 return repo.filtered(b'visible-hidden', revs)
2274 return repo.filtered(b'visible-hidden', revs)
2273
2275
2274
2276
2275 def _getrevsfromsymbols(repo, symbols):
2277 def _getrevsfromsymbols(repo, symbols):
2276 """parse the list of symbols and returns a set of revision numbers of hidden
2278 """parse the list of symbols and returns a set of revision numbers of hidden
2277 changesets present in symbols"""
2279 changesets present in symbols"""
2278 revs = set()
2280 revs = set()
2279 unfi = repo.unfiltered()
2281 unfi = repo.unfiltered()
2280 unficl = unfi.changelog
2282 unficl = unfi.changelog
2281 cl = repo.changelog
2283 cl = repo.changelog
2282 tiprev = len(unficl)
2284 tiprev = len(unficl)
2283 allowrevnums = repo.ui.configbool(b'experimental', b'directaccess.revnums')
2285 allowrevnums = repo.ui.configbool(b'experimental', b'directaccess.revnums')
2284 for s in symbols:
2286 for s in symbols:
2285 try:
2287 try:
2286 n = int(s)
2288 n = int(s)
2287 if n <= tiprev:
2289 if n <= tiprev:
2288 if not allowrevnums:
2290 if not allowrevnums:
2289 continue
2291 continue
2290 else:
2292 else:
2291 if n not in cl:
2293 if n not in cl:
2292 revs.add(n)
2294 revs.add(n)
2293 continue
2295 continue
2294 except ValueError:
2296 except ValueError:
2295 pass
2297 pass
2296
2298
2297 try:
2299 try:
2298 s = resolvehexnodeidprefix(unfi, s)
2300 s = resolvehexnodeidprefix(unfi, s)
2299 except (error.LookupError, error.WdirUnsupported):
2301 except (error.LookupError, error.WdirUnsupported):
2300 s = None
2302 s = None
2301
2303
2302 if s is not None:
2304 if s is not None:
2303 rev = unficl.rev(s)
2305 rev = unficl.rev(s)
2304 if rev not in cl:
2306 if rev not in cl:
2305 revs.add(rev)
2307 revs.add(rev)
2306
2308
2307 return revs
2309 return revs
2308
2310
2309
2311
2310 def bookmarkrevs(repo, mark):
2312 def bookmarkrevs(repo, mark):
2311 """Select revisions reachable by a given bookmark
2313 """Select revisions reachable by a given bookmark
2312
2314
2313 If the bookmarked revision isn't a head, an empty set will be returned.
2315 If the bookmarked revision isn't a head, an empty set will be returned.
2314 """
2316 """
2315 return repo.revs(format_bookmark_revspec(mark))
2317 return repo.revs(format_bookmark_revspec(mark))
2316
2318
2317
2319
2318 def format_bookmark_revspec(mark):
2320 def format_bookmark_revspec(mark):
2319 """Build a revset expression to select revisions reachable by a given
2321 """Build a revset expression to select revisions reachable by a given
2320 bookmark"""
2322 bookmark"""
2321 mark = b'literal:' + mark
2323 mark = b'literal:' + mark
2322 return revsetlang.formatspec(
2324 return revsetlang.formatspec(
2323 b"ancestors(bookmark(%s)) - "
2325 b"ancestors(bookmark(%s)) - "
2324 b"ancestors(head() and not bookmark(%s)) - "
2326 b"ancestors(head() and not bookmark(%s)) - "
2325 b"ancestors(bookmark() and not bookmark(%s))",
2327 b"ancestors(bookmark() and not bookmark(%s))",
2326 mark,
2328 mark,
2327 mark,
2329 mark,
2328 mark,
2330 mark,
2329 )
2331 )
@@ -1,659 +1,670
1 # wireprotov1peer.py - Client-side functionality for wire protocol version 1.
1 # wireprotov1peer.py - Client-side functionality for wire protocol version 1.
2 #
2 #
3 # Copyright 2005-2010 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2010 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import sys
10 import sys
11 import weakref
11 import weakref
12
12
13 from .i18n import _
13 from .i18n import _
14 from .node import bin
14 from .node import bin
15 from .pycompat import (
15 from .pycompat import (
16 getattr,
16 getattr,
17 setattr,
17 setattr,
18 )
18 )
19 from . import (
19 from . import (
20 bundle2,
20 bundle2,
21 changegroup as changegroupmod,
21 changegroup as changegroupmod,
22 encoding,
22 encoding,
23 error,
23 error,
24 pushkey as pushkeymod,
24 pushkey as pushkeymod,
25 pycompat,
25 pycompat,
26 util,
26 util,
27 wireprototypes,
27 wireprototypes,
28 )
28 )
29 from .interfaces import (
29 from .interfaces import (
30 repository,
30 repository,
31 util as interfaceutil,
31 util as interfaceutil,
32 )
32 )
33 from .utils import hashutil
33 from .utils import hashutil
34
34
35 urlreq = util.urlreq
35 urlreq = util.urlreq
36
36
37
37
38 def batchable(f):
38 def batchable(f):
39 """annotation for batchable methods
39 """annotation for batchable methods
40
40
41 Such methods must implement a coroutine as follows:
41 Such methods must implement a coroutine as follows:
42
42
43 @batchable
43 @batchable
44 def sample(self, one, two=None):
44 def sample(self, one, two=None):
45 # Build list of encoded arguments suitable for your wire protocol:
45 # Build list of encoded arguments suitable for your wire protocol:
46 encoded_args = [('one', encode(one),), ('two', encode(two),)]
46 encoded_args = [('one', encode(one),), ('two', encode(two),)]
47 # Create future for injection of encoded result:
47 # Create future for injection of encoded result:
48 encoded_res_future = future()
48 encoded_res_future = future()
49 # Return encoded arguments and future:
49 # Return encoded arguments and future:
50 yield encoded_args, encoded_res_future
50 yield encoded_args, encoded_res_future
51 # Assuming the future to be filled with the result from the batched
51 # Assuming the future to be filled with the result from the batched
52 # request now. Decode it:
52 # request now. Decode it:
53 yield decode(encoded_res_future.value)
53 yield decode(encoded_res_future.value)
54
54
55 The decorator returns a function which wraps this coroutine as a plain
55 The decorator returns a function which wraps this coroutine as a plain
56 method, but adds the original method as an attribute called "batchable",
56 method, but adds the original method as an attribute called "batchable",
57 which is used by remotebatch to split the call into separate encoding and
57 which is used by remotebatch to split the call into separate encoding and
58 decoding phases.
58 decoding phases.
59 """
59 """
60
60
61 def plain(*args, **opts):
61 def plain(*args, **opts):
62 batchable = f(*args, **opts)
62 batchable = f(*args, **opts)
63 encoded_args_or_res, encoded_res_future = next(batchable)
63 encoded_args_or_res, encoded_res_future = next(batchable)
64 if not encoded_res_future:
64 if not encoded_res_future:
65 return encoded_args_or_res # a local result in this case
65 return encoded_args_or_res # a local result in this case
66 self = args[0]
66 self = args[0]
67 cmd = pycompat.bytesurl(f.__name__) # ensure cmd is ascii bytestr
67 cmd = pycompat.bytesurl(f.__name__) # ensure cmd is ascii bytestr
68 encoded_res_future.set(self._submitone(cmd, encoded_args_or_res))
68 encoded_res_future.set(self._submitone(cmd, encoded_args_or_res))
69 return next(batchable)
69 return next(batchable)
70
70
71 setattr(plain, 'batchable', f)
71 setattr(plain, 'batchable', f)
72 setattr(plain, '__name__', f.__name__)
72 setattr(plain, '__name__', f.__name__)
73 return plain
73 return plain
74
74
75
75
76 class future(object):
76 class future(object):
77 '''placeholder for a value to be set later'''
77 '''placeholder for a value to be set later'''
78
78
79 def set(self, value):
79 def set(self, value):
80 if util.safehasattr(self, b'value'):
80 if util.safehasattr(self, b'value'):
81 raise error.RepoError(b"future is already set")
81 raise error.RepoError(b"future is already set")
82 self.value = value
82 self.value = value
83
83
84
84
85 def encodebatchcmds(req):
85 def encodebatchcmds(req):
86 """Return a ``cmds`` argument value for the ``batch`` command."""
86 """Return a ``cmds`` argument value for the ``batch`` command."""
87 escapearg = wireprototypes.escapebatcharg
87 escapearg = wireprototypes.escapebatcharg
88
88
89 cmds = []
89 cmds = []
90 for op, argsdict in req:
90 for op, argsdict in req:
91 # Old servers didn't properly unescape argument names. So prevent
91 # Old servers didn't properly unescape argument names. So prevent
92 # the sending of argument names that may not be decoded properly by
92 # the sending of argument names that may not be decoded properly by
93 # servers.
93 # servers.
94 assert all(escapearg(k) == k for k in argsdict)
94 assert all(escapearg(k) == k for k in argsdict)
95
95
96 args = b','.join(
96 args = b','.join(
97 b'%s=%s' % (escapearg(k), escapearg(v))
97 b'%s=%s' % (escapearg(k), escapearg(v))
98 for k, v in pycompat.iteritems(argsdict)
98 for k, v in pycompat.iteritems(argsdict)
99 )
99 )
100 cmds.append(b'%s %s' % (op, args))
100 cmds.append(b'%s %s' % (op, args))
101
101
102 return b';'.join(cmds)
102 return b';'.join(cmds)
103
103
104
104
105 class unsentfuture(pycompat.futures.Future):
105 class unsentfuture(pycompat.futures.Future):
106 """A Future variation to represent an unsent command.
106 """A Future variation to represent an unsent command.
107
107
108 Because we buffer commands and don't submit them immediately, calling
108 Because we buffer commands and don't submit them immediately, calling
109 ``result()`` on an unsent future could deadlock. Futures for buffered
109 ``result()`` on an unsent future could deadlock. Futures for buffered
110 commands are represented by this type, which wraps ``result()`` to
110 commands are represented by this type, which wraps ``result()`` to
111 call ``sendcommands()``.
111 call ``sendcommands()``.
112 """
112 """
113
113
114 def result(self, timeout=None):
114 def result(self, timeout=None):
115 if self.done():
115 if self.done():
116 return pycompat.futures.Future.result(self, timeout)
116 return pycompat.futures.Future.result(self, timeout)
117
117
118 self._peerexecutor.sendcommands()
118 self._peerexecutor.sendcommands()
119
119
120 # This looks like it will infinitely recurse. However,
120 # This looks like it will infinitely recurse. However,
121 # sendcommands() should modify __class__. This call serves as a check
121 # sendcommands() should modify __class__. This call serves as a check
122 # on that.
122 # on that.
123 return self.result(timeout)
123 return self.result(timeout)
124
124
125
125
126 @interfaceutil.implementer(repository.ipeercommandexecutor)
126 @interfaceutil.implementer(repository.ipeercommandexecutor)
127 class peerexecutor(object):
127 class peerexecutor(object):
128 def __init__(self, peer):
128 def __init__(self, peer):
129 self._peer = peer
129 self._peer = peer
130 self._sent = False
130 self._sent = False
131 self._closed = False
131 self._closed = False
132 self._calls = []
132 self._calls = []
133 self._futures = weakref.WeakSet()
133 self._futures = weakref.WeakSet()
134 self._responseexecutor = None
134 self._responseexecutor = None
135 self._responsef = None
135 self._responsef = None
136
136
137 def __enter__(self):
137 def __enter__(self):
138 return self
138 return self
139
139
140 def __exit__(self, exctype, excvalee, exctb):
140 def __exit__(self, exctype, excvalee, exctb):
141 self.close()
141 self.close()
142
142
143 def callcommand(self, command, args):
143 def callcommand(self, command, args):
144 if self._sent:
144 if self._sent:
145 raise error.ProgrammingError(
145 raise error.ProgrammingError(
146 b'callcommand() cannot be used after commands are sent'
146 b'callcommand() cannot be used after commands are sent'
147 )
147 )
148
148
149 if self._closed:
149 if self._closed:
150 raise error.ProgrammingError(
150 raise error.ProgrammingError(
151 b'callcommand() cannot be used after close()'
151 b'callcommand() cannot be used after close()'
152 )
152 )
153
153
154 # Commands are dispatched through methods on the peer.
154 # Commands are dispatched through methods on the peer.
155 fn = getattr(self._peer, pycompat.sysstr(command), None)
155 fn = getattr(self._peer, pycompat.sysstr(command), None)
156
156
157 if not fn:
157 if not fn:
158 raise error.ProgrammingError(
158 raise error.ProgrammingError(
159 b'cannot call command %s: method of same name not available '
159 b'cannot call command %s: method of same name not available '
160 b'on peer' % command
160 b'on peer' % command
161 )
161 )
162
162
163 # Commands are either batchable or they aren't. If a command
163 # Commands are either batchable or they aren't. If a command
164 # isn't batchable, we send it immediately because the executor
164 # isn't batchable, we send it immediately because the executor
165 # can no longer accept new commands after a non-batchable command.
165 # can no longer accept new commands after a non-batchable command.
166 # If a command is batchable, we queue it for later. But we have
166 # If a command is batchable, we queue it for later. But we have
167 # to account for the case of a non-batchable command arriving after
167 # to account for the case of a non-batchable command arriving after
168 # a batchable one and refuse to service it.
168 # a batchable one and refuse to service it.
169
169
170 def addcall():
170 def addcall():
171 f = pycompat.futures.Future()
171 f = pycompat.futures.Future()
172 self._futures.add(f)
172 self._futures.add(f)
173 self._calls.append((command, args, fn, f))
173 self._calls.append((command, args, fn, f))
174 return f
174 return f
175
175
176 if getattr(fn, 'batchable', False):
176 if getattr(fn, 'batchable', False):
177 f = addcall()
177 f = addcall()
178
178
179 # But since we don't issue it immediately, we wrap its result()
179 # But since we don't issue it immediately, we wrap its result()
180 # to trigger sending so we avoid deadlocks.
180 # to trigger sending so we avoid deadlocks.
181 f.__class__ = unsentfuture
181 f.__class__ = unsentfuture
182 f._peerexecutor = self
182 f._peerexecutor = self
183 else:
183 else:
184 if self._calls:
184 if self._calls:
185 raise error.ProgrammingError(
185 raise error.ProgrammingError(
186 b'%s is not batchable and cannot be called on a command '
186 b'%s is not batchable and cannot be called on a command '
187 b'executor along with other commands' % command
187 b'executor along with other commands' % command
188 )
188 )
189
189
190 f = addcall()
190 f = addcall()
191
191
192 # Non-batchable commands can never coexist with another command
192 # Non-batchable commands can never coexist with another command
193 # in this executor. So send the command immediately.
193 # in this executor. So send the command immediately.
194 self.sendcommands()
194 self.sendcommands()
195
195
196 return f
196 return f
197
197
198 def sendcommands(self):
198 def sendcommands(self):
199 if self._sent:
199 if self._sent:
200 return
200 return
201
201
202 if not self._calls:
202 if not self._calls:
203 return
203 return
204
204
205 self._sent = True
205 self._sent = True
206
206
207 # Unhack any future types so caller seens a clean type and to break
207 # Unhack any future types so caller seens a clean type and to break
208 # cycle between us and futures.
208 # cycle between us and futures.
209 for f in self._futures:
209 for f in self._futures:
210 if isinstance(f, unsentfuture):
210 if isinstance(f, unsentfuture):
211 f.__class__ = pycompat.futures.Future
211 f.__class__ = pycompat.futures.Future
212 f._peerexecutor = None
212 f._peerexecutor = None
213
213
214 calls = self._calls
214 calls = self._calls
215 # Mainly to destroy references to futures.
215 # Mainly to destroy references to futures.
216 self._calls = None
216 self._calls = None
217
217
218 # Simple case of a single command. We call it synchronously.
218 # Simple case of a single command. We call it synchronously.
219 if len(calls) == 1:
219 if len(calls) == 1:
220 command, args, fn, f = calls[0]
220 command, args, fn, f = calls[0]
221
221
222 # Future was cancelled. Ignore it.
222 # Future was cancelled. Ignore it.
223 if not f.set_running_or_notify_cancel():
223 if not f.set_running_or_notify_cancel():
224 return
224 return
225
225
226 try:
226 try:
227 result = fn(**pycompat.strkwargs(args))
227 result = fn(**pycompat.strkwargs(args))
228 except Exception:
228 except Exception:
229 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
229 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
230 else:
230 else:
231 f.set_result(result)
231 f.set_result(result)
232
232
233 return
233 return
234
234
235 # Batch commands are a bit harder. First, we have to deal with the
235 # Batch commands are a bit harder. First, we have to deal with the
236 # @batchable coroutine. That's a bit annoying. Furthermore, we also
236 # @batchable coroutine. That's a bit annoying. Furthermore, we also
237 # need to preserve streaming. i.e. it should be possible for the
237 # need to preserve streaming. i.e. it should be possible for the
238 # futures to resolve as data is coming in off the wire without having
238 # futures to resolve as data is coming in off the wire without having
239 # to wait for the final byte of the final response. We do this by
239 # to wait for the final byte of the final response. We do this by
240 # spinning up a thread to read the responses.
240 # spinning up a thread to read the responses.
241
241
242 requests = []
242 requests = []
243 states = []
243 states = []
244
244
245 for command, args, fn, f in calls:
245 for command, args, fn, f in calls:
246 # Future was cancelled. Ignore it.
246 # Future was cancelled. Ignore it.
247 if not f.set_running_or_notify_cancel():
247 if not f.set_running_or_notify_cancel():
248 continue
248 continue
249
249
250 try:
250 try:
251 batchable = fn.batchable(
251 batchable = fn.batchable(
252 fn.__self__, **pycompat.strkwargs(args)
252 fn.__self__, **pycompat.strkwargs(args)
253 )
253 )
254 except Exception:
254 except Exception:
255 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
255 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
256 return
256 return
257
257
258 # Encoded arguments and future holding remote result.
258 # Encoded arguments and future holding remote result.
259 try:
259 try:
260 encoded_args_or_res, fremote = next(batchable)
260 encoded_args_or_res, fremote = next(batchable)
261 except Exception:
261 except Exception:
262 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
262 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
263 return
263 return
264
264
265 if not fremote:
265 if not fremote:
266 f.set_result(encoded_args_or_res)
266 f.set_result(encoded_args_or_res)
267 else:
267 else:
268 requests.append((command, encoded_args_or_res))
268 requests.append((command, encoded_args_or_res))
269 states.append((command, f, batchable, fremote))
269 states.append((command, f, batchable, fremote))
270
270
271 if not requests:
271 if not requests:
272 return
272 return
273
273
274 # This will emit responses in order they were executed.
274 # This will emit responses in order they were executed.
275 wireresults = self._peer._submitbatch(requests)
275 wireresults = self._peer._submitbatch(requests)
276
276
277 # The use of a thread pool executor here is a bit weird for something
277 # The use of a thread pool executor here is a bit weird for something
278 # that only spins up a single thread. However, thread management is
278 # that only spins up a single thread. However, thread management is
279 # hard and it is easy to encounter race conditions, deadlocks, etc.
279 # hard and it is easy to encounter race conditions, deadlocks, etc.
280 # concurrent.futures already solves these problems and its thread pool
280 # concurrent.futures already solves these problems and its thread pool
281 # executor has minimal overhead. So we use it.
281 # executor has minimal overhead. So we use it.
282 self._responseexecutor = pycompat.futures.ThreadPoolExecutor(1)
282 self._responseexecutor = pycompat.futures.ThreadPoolExecutor(1)
283 self._responsef = self._responseexecutor.submit(
283 self._responsef = self._responseexecutor.submit(
284 self._readbatchresponse, states, wireresults
284 self._readbatchresponse, states, wireresults
285 )
285 )
286
286
287 def close(self):
287 def close(self):
288 self.sendcommands()
288 self.sendcommands()
289
289
290 if self._closed:
290 if self._closed:
291 return
291 return
292
292
293 self._closed = True
293 self._closed = True
294
294
295 if not self._responsef:
295 if not self._responsef:
296 return
296 return
297
297
298 # We need to wait on our in-flight response and then shut down the
298 # We need to wait on our in-flight response and then shut down the
299 # executor once we have a result.
299 # executor once we have a result.
300 try:
300 try:
301 self._responsef.result()
301 self._responsef.result()
302 finally:
302 finally:
303 self._responseexecutor.shutdown(wait=True)
303 self._responseexecutor.shutdown(wait=True)
304 self._responsef = None
304 self._responsef = None
305 self._responseexecutor = None
305 self._responseexecutor = None
306
306
307 # If any of our futures are still in progress, mark them as
307 # If any of our futures are still in progress, mark them as
308 # errored. Otherwise a result() could wait indefinitely.
308 # errored. Otherwise a result() could wait indefinitely.
309 for f in self._futures:
309 for f in self._futures:
310 if not f.done():
310 if not f.done():
311 f.set_exception(
311 f.set_exception(
312 error.ResponseError(
312 error.ResponseError(
313 _(b'unfulfilled batch command response')
313 _(b'unfulfilled batch command response'), None
314 )
314 )
315 )
315 )
316
316
317 self._futures = None
317 self._futures = None
318
318
319 def _readbatchresponse(self, states, wireresults):
319 def _readbatchresponse(self, states, wireresults):
320 # Executes in a thread to read data off the wire.
320 # Executes in a thread to read data off the wire.
321
321
322 for command, f, batchable, fremote in states:
322 for command, f, batchable, fremote in states:
323 # Grab raw result off the wire and teach the internal future
323 # Grab raw result off the wire and teach the internal future
324 # about it.
324 # about it.
325 remoteresult = next(wireresults)
325 try:
326 fremote.set(remoteresult)
326 remoteresult = next(wireresults)
327 except StopIteration:
328 # This can happen in particular because next(batchable)
329 # in the previous iteration can call peer._abort, which
330 # may close the peer.
331 f.set_exception(
332 error.ResponseError(
333 _(b'unfulfilled batch command response'), None
334 )
335 )
336 else:
337 fremote.set(remoteresult)
327
338
328 # And ask the coroutine to decode that value.
339 # And ask the coroutine to decode that value.
329 try:
340 try:
330 result = next(batchable)
341 result = next(batchable)
331 except Exception:
342 except Exception:
332 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
343 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
333 else:
344 else:
334 f.set_result(result)
345 f.set_result(result)
335
346
336
347
337 @interfaceutil.implementer(
348 @interfaceutil.implementer(
338 repository.ipeercommands, repository.ipeerlegacycommands
349 repository.ipeercommands, repository.ipeerlegacycommands
339 )
350 )
340 class wirepeer(repository.peer):
351 class wirepeer(repository.peer):
341 """Client-side interface for communicating with a peer repository.
352 """Client-side interface for communicating with a peer repository.
342
353
343 Methods commonly call wire protocol commands of the same name.
354 Methods commonly call wire protocol commands of the same name.
344
355
345 See also httppeer.py and sshpeer.py for protocol-specific
356 See also httppeer.py and sshpeer.py for protocol-specific
346 implementations of this interface.
357 implementations of this interface.
347 """
358 """
348
359
349 def commandexecutor(self):
360 def commandexecutor(self):
350 return peerexecutor(self)
361 return peerexecutor(self)
351
362
352 # Begin of ipeercommands interface.
363 # Begin of ipeercommands interface.
353
364
354 def clonebundles(self):
365 def clonebundles(self):
355 self.requirecap(b'clonebundles', _(b'clone bundles'))
366 self.requirecap(b'clonebundles', _(b'clone bundles'))
356 return self._call(b'clonebundles')
367 return self._call(b'clonebundles')
357
368
358 @batchable
369 @batchable
359 def lookup(self, key):
370 def lookup(self, key):
360 self.requirecap(b'lookup', _(b'look up remote revision'))
371 self.requirecap(b'lookup', _(b'look up remote revision'))
361 f = future()
372 f = future()
362 yield {b'key': encoding.fromlocal(key)}, f
373 yield {b'key': encoding.fromlocal(key)}, f
363 d = f.value
374 d = f.value
364 success, data = d[:-1].split(b" ", 1)
375 success, data = d[:-1].split(b" ", 1)
365 if int(success):
376 if int(success):
366 yield bin(data)
377 yield bin(data)
367 else:
378 else:
368 self._abort(error.RepoError(data))
379 self._abort(error.RepoError(data))
369
380
370 @batchable
381 @batchable
371 def heads(self):
382 def heads(self):
372 f = future()
383 f = future()
373 yield {}, f
384 yield {}, f
374 d = f.value
385 d = f.value
375 try:
386 try:
376 yield wireprototypes.decodelist(d[:-1])
387 yield wireprototypes.decodelist(d[:-1])
377 except ValueError:
388 except ValueError:
378 self._abort(error.ResponseError(_(b"unexpected response:"), d))
389 self._abort(error.ResponseError(_(b"unexpected response:"), d))
379
390
380 @batchable
391 @batchable
381 def known(self, nodes):
392 def known(self, nodes):
382 f = future()
393 f = future()
383 yield {b'nodes': wireprototypes.encodelist(nodes)}, f
394 yield {b'nodes': wireprototypes.encodelist(nodes)}, f
384 d = f.value
395 d = f.value
385 try:
396 try:
386 yield [bool(int(b)) for b in pycompat.iterbytestr(d)]
397 yield [bool(int(b)) for b in pycompat.iterbytestr(d)]
387 except ValueError:
398 except ValueError:
388 self._abort(error.ResponseError(_(b"unexpected response:"), d))
399 self._abort(error.ResponseError(_(b"unexpected response:"), d))
389
400
390 @batchable
401 @batchable
391 def branchmap(self):
402 def branchmap(self):
392 f = future()
403 f = future()
393 yield {}, f
404 yield {}, f
394 d = f.value
405 d = f.value
395 try:
406 try:
396 branchmap = {}
407 branchmap = {}
397 for branchpart in d.splitlines():
408 for branchpart in d.splitlines():
398 branchname, branchheads = branchpart.split(b' ', 1)
409 branchname, branchheads = branchpart.split(b' ', 1)
399 branchname = encoding.tolocal(urlreq.unquote(branchname))
410 branchname = encoding.tolocal(urlreq.unquote(branchname))
400 branchheads = wireprototypes.decodelist(branchheads)
411 branchheads = wireprototypes.decodelist(branchheads)
401 branchmap[branchname] = branchheads
412 branchmap[branchname] = branchheads
402 yield branchmap
413 yield branchmap
403 except TypeError:
414 except TypeError:
404 self._abort(error.ResponseError(_(b"unexpected response:"), d))
415 self._abort(error.ResponseError(_(b"unexpected response:"), d))
405
416
406 @batchable
417 @batchable
407 def listkeys(self, namespace):
418 def listkeys(self, namespace):
408 if not self.capable(b'pushkey'):
419 if not self.capable(b'pushkey'):
409 yield {}, None
420 yield {}, None
410 f = future()
421 f = future()
411 self.ui.debug(b'preparing listkeys for "%s"\n' % namespace)
422 self.ui.debug(b'preparing listkeys for "%s"\n' % namespace)
412 yield {b'namespace': encoding.fromlocal(namespace)}, f
423 yield {b'namespace': encoding.fromlocal(namespace)}, f
413 d = f.value
424 d = f.value
414 self.ui.debug(
425 self.ui.debug(
415 b'received listkey for "%s": %i bytes\n' % (namespace, len(d))
426 b'received listkey for "%s": %i bytes\n' % (namespace, len(d))
416 )
427 )
417 yield pushkeymod.decodekeys(d)
428 yield pushkeymod.decodekeys(d)
418
429
419 @batchable
430 @batchable
420 def pushkey(self, namespace, key, old, new):
431 def pushkey(self, namespace, key, old, new):
421 if not self.capable(b'pushkey'):
432 if not self.capable(b'pushkey'):
422 yield False, None
433 yield False, None
423 f = future()
434 f = future()
424 self.ui.debug(b'preparing pushkey for "%s:%s"\n' % (namespace, key))
435 self.ui.debug(b'preparing pushkey for "%s:%s"\n' % (namespace, key))
425 yield {
436 yield {
426 b'namespace': encoding.fromlocal(namespace),
437 b'namespace': encoding.fromlocal(namespace),
427 b'key': encoding.fromlocal(key),
438 b'key': encoding.fromlocal(key),
428 b'old': encoding.fromlocal(old),
439 b'old': encoding.fromlocal(old),
429 b'new': encoding.fromlocal(new),
440 b'new': encoding.fromlocal(new),
430 }, f
441 }, f
431 d = f.value
442 d = f.value
432 d, output = d.split(b'\n', 1)
443 d, output = d.split(b'\n', 1)
433 try:
444 try:
434 d = bool(int(d))
445 d = bool(int(d))
435 except ValueError:
446 except ValueError:
436 raise error.ResponseError(
447 raise error.ResponseError(
437 _(b'push failed (unexpected response):'), d
448 _(b'push failed (unexpected response):'), d
438 )
449 )
439 for l in output.splitlines(True):
450 for l in output.splitlines(True):
440 self.ui.status(_(b'remote: '), l)
451 self.ui.status(_(b'remote: '), l)
441 yield d
452 yield d
442
453
443 def stream_out(self):
454 def stream_out(self):
444 return self._callstream(b'stream_out')
455 return self._callstream(b'stream_out')
445
456
446 def getbundle(self, source, **kwargs):
457 def getbundle(self, source, **kwargs):
447 kwargs = pycompat.byteskwargs(kwargs)
458 kwargs = pycompat.byteskwargs(kwargs)
448 self.requirecap(b'getbundle', _(b'look up remote changes'))
459 self.requirecap(b'getbundle', _(b'look up remote changes'))
449 opts = {}
460 opts = {}
450 bundlecaps = kwargs.get(b'bundlecaps') or set()
461 bundlecaps = kwargs.get(b'bundlecaps') or set()
451 for key, value in pycompat.iteritems(kwargs):
462 for key, value in pycompat.iteritems(kwargs):
452 if value is None:
463 if value is None:
453 continue
464 continue
454 keytype = wireprototypes.GETBUNDLE_ARGUMENTS.get(key)
465 keytype = wireprototypes.GETBUNDLE_ARGUMENTS.get(key)
455 if keytype is None:
466 if keytype is None:
456 raise error.ProgrammingError(
467 raise error.ProgrammingError(
457 b'Unexpectedly None keytype for key %s' % key
468 b'Unexpectedly None keytype for key %s' % key
458 )
469 )
459 elif keytype == b'nodes':
470 elif keytype == b'nodes':
460 value = wireprototypes.encodelist(value)
471 value = wireprototypes.encodelist(value)
461 elif keytype == b'csv':
472 elif keytype == b'csv':
462 value = b','.join(value)
473 value = b','.join(value)
463 elif keytype == b'scsv':
474 elif keytype == b'scsv':
464 value = b','.join(sorted(value))
475 value = b','.join(sorted(value))
465 elif keytype == b'boolean':
476 elif keytype == b'boolean':
466 value = b'%i' % bool(value)
477 value = b'%i' % bool(value)
467 elif keytype != b'plain':
478 elif keytype != b'plain':
468 raise KeyError(b'unknown getbundle option type %s' % keytype)
479 raise KeyError(b'unknown getbundle option type %s' % keytype)
469 opts[key] = value
480 opts[key] = value
470 f = self._callcompressable(b"getbundle", **pycompat.strkwargs(opts))
481 f = self._callcompressable(b"getbundle", **pycompat.strkwargs(opts))
471 if any((cap.startswith(b'HG2') for cap in bundlecaps)):
482 if any((cap.startswith(b'HG2') for cap in bundlecaps)):
472 return bundle2.getunbundler(self.ui, f)
483 return bundle2.getunbundler(self.ui, f)
473 else:
484 else:
474 return changegroupmod.cg1unpacker(f, b'UN')
485 return changegroupmod.cg1unpacker(f, b'UN')
475
486
476 def unbundle(self, bundle, heads, url):
487 def unbundle(self, bundle, heads, url):
477 """Send cg (a readable file-like object representing the
488 """Send cg (a readable file-like object representing the
478 changegroup to push, typically a chunkbuffer object) to the
489 changegroup to push, typically a chunkbuffer object) to the
479 remote server as a bundle.
490 remote server as a bundle.
480
491
481 When pushing a bundle10 stream, return an integer indicating the
492 When pushing a bundle10 stream, return an integer indicating the
482 result of the push (see changegroup.apply()).
493 result of the push (see changegroup.apply()).
483
494
484 When pushing a bundle20 stream, return a bundle20 stream.
495 When pushing a bundle20 stream, return a bundle20 stream.
485
496
486 `url` is the url the client thinks it's pushing to, which is
497 `url` is the url the client thinks it's pushing to, which is
487 visible to hooks.
498 visible to hooks.
488 """
499 """
489
500
490 if heads != [b'force'] and self.capable(b'unbundlehash'):
501 if heads != [b'force'] and self.capable(b'unbundlehash'):
491 heads = wireprototypes.encodelist(
502 heads = wireprototypes.encodelist(
492 [b'hashed', hashutil.sha1(b''.join(sorted(heads))).digest()]
503 [b'hashed', hashutil.sha1(b''.join(sorted(heads))).digest()]
493 )
504 )
494 else:
505 else:
495 heads = wireprototypes.encodelist(heads)
506 heads = wireprototypes.encodelist(heads)
496
507
497 if util.safehasattr(bundle, b'deltaheader'):
508 if util.safehasattr(bundle, b'deltaheader'):
498 # this a bundle10, do the old style call sequence
509 # this a bundle10, do the old style call sequence
499 ret, output = self._callpush(b"unbundle", bundle, heads=heads)
510 ret, output = self._callpush(b"unbundle", bundle, heads=heads)
500 if ret == b"":
511 if ret == b"":
501 raise error.ResponseError(_(b'push failed:'), output)
512 raise error.ResponseError(_(b'push failed:'), output)
502 try:
513 try:
503 ret = int(ret)
514 ret = int(ret)
504 except ValueError:
515 except ValueError:
505 raise error.ResponseError(
516 raise error.ResponseError(
506 _(b'push failed (unexpected response):'), ret
517 _(b'push failed (unexpected response):'), ret
507 )
518 )
508
519
509 for l in output.splitlines(True):
520 for l in output.splitlines(True):
510 self.ui.status(_(b'remote: '), l)
521 self.ui.status(_(b'remote: '), l)
511 else:
522 else:
512 # bundle2 push. Send a stream, fetch a stream.
523 # bundle2 push. Send a stream, fetch a stream.
513 stream = self._calltwowaystream(b'unbundle', bundle, heads=heads)
524 stream = self._calltwowaystream(b'unbundle', bundle, heads=heads)
514 ret = bundle2.getunbundler(self.ui, stream)
525 ret = bundle2.getunbundler(self.ui, stream)
515 return ret
526 return ret
516
527
517 # End of ipeercommands interface.
528 # End of ipeercommands interface.
518
529
519 # Begin of ipeerlegacycommands interface.
530 # Begin of ipeerlegacycommands interface.
520
531
521 def branches(self, nodes):
532 def branches(self, nodes):
522 n = wireprototypes.encodelist(nodes)
533 n = wireprototypes.encodelist(nodes)
523 d = self._call(b"branches", nodes=n)
534 d = self._call(b"branches", nodes=n)
524 try:
535 try:
525 br = [tuple(wireprototypes.decodelist(b)) for b in d.splitlines()]
536 br = [tuple(wireprototypes.decodelist(b)) for b in d.splitlines()]
526 return br
537 return br
527 except ValueError:
538 except ValueError:
528 self._abort(error.ResponseError(_(b"unexpected response:"), d))
539 self._abort(error.ResponseError(_(b"unexpected response:"), d))
529
540
530 def between(self, pairs):
541 def between(self, pairs):
531 batch = 8 # avoid giant requests
542 batch = 8 # avoid giant requests
532 r = []
543 r = []
533 for i in pycompat.xrange(0, len(pairs), batch):
544 for i in pycompat.xrange(0, len(pairs), batch):
534 n = b" ".join(
545 n = b" ".join(
535 [
546 [
536 wireprototypes.encodelist(p, b'-')
547 wireprototypes.encodelist(p, b'-')
537 for p in pairs[i : i + batch]
548 for p in pairs[i : i + batch]
538 ]
549 ]
539 )
550 )
540 d = self._call(b"between", pairs=n)
551 d = self._call(b"between", pairs=n)
541 try:
552 try:
542 r.extend(
553 r.extend(
543 l and wireprototypes.decodelist(l) or []
554 l and wireprototypes.decodelist(l) or []
544 for l in d.splitlines()
555 for l in d.splitlines()
545 )
556 )
546 except ValueError:
557 except ValueError:
547 self._abort(error.ResponseError(_(b"unexpected response:"), d))
558 self._abort(error.ResponseError(_(b"unexpected response:"), d))
548 return r
559 return r
549
560
550 def changegroup(self, nodes, source):
561 def changegroup(self, nodes, source):
551 n = wireprototypes.encodelist(nodes)
562 n = wireprototypes.encodelist(nodes)
552 f = self._callcompressable(b"changegroup", roots=n)
563 f = self._callcompressable(b"changegroup", roots=n)
553 return changegroupmod.cg1unpacker(f, b'UN')
564 return changegroupmod.cg1unpacker(f, b'UN')
554
565
555 def changegroupsubset(self, bases, heads, source):
566 def changegroupsubset(self, bases, heads, source):
556 self.requirecap(b'changegroupsubset', _(b'look up remote changes'))
567 self.requirecap(b'changegroupsubset', _(b'look up remote changes'))
557 bases = wireprototypes.encodelist(bases)
568 bases = wireprototypes.encodelist(bases)
558 heads = wireprototypes.encodelist(heads)
569 heads = wireprototypes.encodelist(heads)
559 f = self._callcompressable(
570 f = self._callcompressable(
560 b"changegroupsubset", bases=bases, heads=heads
571 b"changegroupsubset", bases=bases, heads=heads
561 )
572 )
562 return changegroupmod.cg1unpacker(f, b'UN')
573 return changegroupmod.cg1unpacker(f, b'UN')
563
574
564 # End of ipeerlegacycommands interface.
575 # End of ipeerlegacycommands interface.
565
576
566 def _submitbatch(self, req):
577 def _submitbatch(self, req):
567 """run batch request <req> on the server
578 """run batch request <req> on the server
568
579
569 Returns an iterator of the raw responses from the server.
580 Returns an iterator of the raw responses from the server.
570 """
581 """
571 ui = self.ui
582 ui = self.ui
572 if ui.debugflag and ui.configbool(b'devel', b'debug.peer-request'):
583 if ui.debugflag and ui.configbool(b'devel', b'debug.peer-request'):
573 ui.debug(b'devel-peer-request: batched-content\n')
584 ui.debug(b'devel-peer-request: batched-content\n')
574 for op, args in req:
585 for op, args in req:
575 msg = b'devel-peer-request: - %s (%d arguments)\n'
586 msg = b'devel-peer-request: - %s (%d arguments)\n'
576 ui.debug(msg % (op, len(args)))
587 ui.debug(msg % (op, len(args)))
577
588
578 unescapearg = wireprototypes.unescapebatcharg
589 unescapearg = wireprototypes.unescapebatcharg
579
590
580 rsp = self._callstream(b"batch", cmds=encodebatchcmds(req))
591 rsp = self._callstream(b"batch", cmds=encodebatchcmds(req))
581 chunk = rsp.read(1024)
592 chunk = rsp.read(1024)
582 work = [chunk]
593 work = [chunk]
583 while chunk:
594 while chunk:
584 while b';' not in chunk and chunk:
595 while b';' not in chunk and chunk:
585 chunk = rsp.read(1024)
596 chunk = rsp.read(1024)
586 work.append(chunk)
597 work.append(chunk)
587 merged = b''.join(work)
598 merged = b''.join(work)
588 while b';' in merged:
599 while b';' in merged:
589 one, merged = merged.split(b';', 1)
600 one, merged = merged.split(b';', 1)
590 yield unescapearg(one)
601 yield unescapearg(one)
591 chunk = rsp.read(1024)
602 chunk = rsp.read(1024)
592 work = [merged, chunk]
603 work = [merged, chunk]
593 yield unescapearg(b''.join(work))
604 yield unescapearg(b''.join(work))
594
605
595 def _submitone(self, op, args):
606 def _submitone(self, op, args):
596 return self._call(op, **pycompat.strkwargs(args))
607 return self._call(op, **pycompat.strkwargs(args))
597
608
598 def debugwireargs(self, one, two, three=None, four=None, five=None):
609 def debugwireargs(self, one, two, three=None, four=None, five=None):
599 # don't pass optional arguments left at their default value
610 # don't pass optional arguments left at their default value
600 opts = {}
611 opts = {}
601 if three is not None:
612 if three is not None:
602 opts['three'] = three
613 opts['three'] = three
603 if four is not None:
614 if four is not None:
604 opts['four'] = four
615 opts['four'] = four
605 return self._call(b'debugwireargs', one=one, two=two, **opts)
616 return self._call(b'debugwireargs', one=one, two=two, **opts)
606
617
607 def _call(self, cmd, **args):
618 def _call(self, cmd, **args):
608 """execute <cmd> on the server
619 """execute <cmd> on the server
609
620
610 The command is expected to return a simple string.
621 The command is expected to return a simple string.
611
622
612 returns the server reply as a string."""
623 returns the server reply as a string."""
613 raise NotImplementedError()
624 raise NotImplementedError()
614
625
615 def _callstream(self, cmd, **args):
626 def _callstream(self, cmd, **args):
616 """execute <cmd> on the server
627 """execute <cmd> on the server
617
628
618 The command is expected to return a stream. Note that if the
629 The command is expected to return a stream. Note that if the
619 command doesn't return a stream, _callstream behaves
630 command doesn't return a stream, _callstream behaves
620 differently for ssh and http peers.
631 differently for ssh and http peers.
621
632
622 returns the server reply as a file like object.
633 returns the server reply as a file like object.
623 """
634 """
624 raise NotImplementedError()
635 raise NotImplementedError()
625
636
626 def _callcompressable(self, cmd, **args):
637 def _callcompressable(self, cmd, **args):
627 """execute <cmd> on the server
638 """execute <cmd> on the server
628
639
629 The command is expected to return a stream.
640 The command is expected to return a stream.
630
641
631 The stream may have been compressed in some implementations. This
642 The stream may have been compressed in some implementations. This
632 function takes care of the decompression. This is the only difference
643 function takes care of the decompression. This is the only difference
633 with _callstream.
644 with _callstream.
634
645
635 returns the server reply as a file like object.
646 returns the server reply as a file like object.
636 """
647 """
637 raise NotImplementedError()
648 raise NotImplementedError()
638
649
639 def _callpush(self, cmd, fp, **args):
650 def _callpush(self, cmd, fp, **args):
640 """execute a <cmd> on server
651 """execute a <cmd> on server
641
652
642 The command is expected to be related to a push. Push has a special
653 The command is expected to be related to a push. Push has a special
643 return method.
654 return method.
644
655
645 returns the server reply as a (ret, output) tuple. ret is either
656 returns the server reply as a (ret, output) tuple. ret is either
646 empty (error) or a stringified int.
657 empty (error) or a stringified int.
647 """
658 """
648 raise NotImplementedError()
659 raise NotImplementedError()
649
660
650 def _calltwowaystream(self, cmd, fp, **args):
661 def _calltwowaystream(self, cmd, fp, **args):
651 """execute <cmd> on server
662 """execute <cmd> on server
652
663
653 The command will send a stream to the server and get a stream in reply.
664 The command will send a stream to the server and get a stream in reply.
654 """
665 """
655 raise NotImplementedError()
666 raise NotImplementedError()
656
667
657 def _abort(self, exception):
668 def _abort(self, exception):
658 """clearly abort the wire protocol connection and raise the exception"""
669 """clearly abort the wire protocol connection and raise the exception"""
659 raise NotImplementedError()
670 raise NotImplementedError()
@@ -1,13 +1,15
1 $ hg init a
1 $ hg init a
2 $ cd a
2 $ cd a
3 $ touch a; hg commit -qAm_
3 $ touch a; hg commit -qAm_
4 $ hg bookmark $(for i in $($TESTDIR/seq.py 0 20); do echo b$i; done)
4 $ hg bookmark $(for i in $($TESTDIR/seq.py 0 20); do echo b$i; done)
5 $ hg clone . ../b -q
5 $ hg clone . ../b -q
6 $ cd ../b
6 $ cd ../b
7
7
8 Checking that when lookup multiple bookmarks in one go, if one of them
8 Checking that when lookup multiple bookmarks in one go, if one of them
9 fails (thus causing the sshpeer to be stopped), the errors from the
9 fails (thus causing the sshpeer to be stopped), the errors from the
10 further lookups don't result in tracebacks.
10 further lookups don't result in tracebacks.
11
11
12 $ hg pull -r b0 -r nosuchbookmark $(for i in $($TESTDIR/seq.py 1 20); do echo -r b$i; done) -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/$(pwd)/../a |& tail -n 1
12 $ hg pull -r b0 -r nosuchbookmark $(for i in $($TESTDIR/seq.py 1 20); do echo -r b$i; done) -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/$(pwd)/../a
13 StopIteration
13 pulling from ssh://user@dummy/$TESTTMP/b/../a
14 abort: unknown revision 'nosuchbookmark'
15 [255]
General Comments 0
You need to be logged in to leave comments. Login now