##// END OF EJS Templates
py3: fix exception message encoding in scmutil.py's simplekeyvaluefile.read...
Emmanuel Leblond -
r43682:899e55e2 stable
parent child Browse files
Show More
@@ -1,2221 +1,2221
1 # scmutil.py - Mercurial core utility functions
1 # scmutil.py - Mercurial core utility functions
2 #
2 #
3 # Copyright Matt Mackall <mpm@selenic.com>
3 # Copyright Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import glob
11 import glob
12 import hashlib
12 import hashlib
13 import os
13 import os
14 import posixpath
14 import posixpath
15 import re
15 import re
16 import subprocess
16 import subprocess
17 import weakref
17 import weakref
18
18
19 from .i18n import _
19 from .i18n import _
20 from .node import (
20 from .node import (
21 bin,
21 bin,
22 hex,
22 hex,
23 nullid,
23 nullid,
24 nullrev,
24 nullrev,
25 short,
25 short,
26 wdirid,
26 wdirid,
27 wdirrev,
27 wdirrev,
28 )
28 )
29 from .pycompat import getattr
29 from .pycompat import getattr
30
30
31 from . import (
31 from . import (
32 copies as copiesmod,
32 copies as copiesmod,
33 encoding,
33 encoding,
34 error,
34 error,
35 match as matchmod,
35 match as matchmod,
36 obsolete,
36 obsolete,
37 obsutil,
37 obsutil,
38 pathutil,
38 pathutil,
39 phases,
39 phases,
40 policy,
40 policy,
41 pycompat,
41 pycompat,
42 revsetlang,
42 revsetlang,
43 similar,
43 similar,
44 smartset,
44 smartset,
45 url,
45 url,
46 util,
46 util,
47 vfs,
47 vfs,
48 )
48 )
49
49
50 from .utils import (
50 from .utils import (
51 procutil,
51 procutil,
52 stringutil,
52 stringutil,
53 )
53 )
54
54
55 if pycompat.iswindows:
55 if pycompat.iswindows:
56 from . import scmwindows as scmplatform
56 from . import scmwindows as scmplatform
57 else:
57 else:
58 from . import scmposix as scmplatform
58 from . import scmposix as scmplatform
59
59
60 parsers = policy.importmod(r'parsers')
60 parsers = policy.importmod(r'parsers')
61
61
62 termsize = scmplatform.termsize
62 termsize = scmplatform.termsize
63
63
64
64
65 class status(tuple):
65 class status(tuple):
66 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
66 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
67 and 'ignored' properties are only relevant to the working copy.
67 and 'ignored' properties are only relevant to the working copy.
68 '''
68 '''
69
69
70 __slots__ = ()
70 __slots__ = ()
71
71
72 def __new__(
72 def __new__(
73 cls, modified, added, removed, deleted, unknown, ignored, clean
73 cls, modified, added, removed, deleted, unknown, ignored, clean
74 ):
74 ):
75 return tuple.__new__(
75 return tuple.__new__(
76 cls, (modified, added, removed, deleted, unknown, ignored, clean)
76 cls, (modified, added, removed, deleted, unknown, ignored, clean)
77 )
77 )
78
78
79 @property
79 @property
80 def modified(self):
80 def modified(self):
81 '''files that have been modified'''
81 '''files that have been modified'''
82 return self[0]
82 return self[0]
83
83
84 @property
84 @property
85 def added(self):
85 def added(self):
86 '''files that have been added'''
86 '''files that have been added'''
87 return self[1]
87 return self[1]
88
88
89 @property
89 @property
90 def removed(self):
90 def removed(self):
91 '''files that have been removed'''
91 '''files that have been removed'''
92 return self[2]
92 return self[2]
93
93
94 @property
94 @property
95 def deleted(self):
95 def deleted(self):
96 '''files that are in the dirstate, but have been deleted from the
96 '''files that are in the dirstate, but have been deleted from the
97 working copy (aka "missing")
97 working copy (aka "missing")
98 '''
98 '''
99 return self[3]
99 return self[3]
100
100
101 @property
101 @property
102 def unknown(self):
102 def unknown(self):
103 '''files not in the dirstate that are not ignored'''
103 '''files not in the dirstate that are not ignored'''
104 return self[4]
104 return self[4]
105
105
106 @property
106 @property
107 def ignored(self):
107 def ignored(self):
108 '''files not in the dirstate that are ignored (by _dirignore())'''
108 '''files not in the dirstate that are ignored (by _dirignore())'''
109 return self[5]
109 return self[5]
110
110
111 @property
111 @property
112 def clean(self):
112 def clean(self):
113 '''files that have not been modified'''
113 '''files that have not been modified'''
114 return self[6]
114 return self[6]
115
115
116 def __repr__(self, *args, **kwargs):
116 def __repr__(self, *args, **kwargs):
117 return (
117 return (
118 r'<status modified=%s, added=%s, removed=%s, deleted=%s, '
118 r'<status modified=%s, added=%s, removed=%s, deleted=%s, '
119 r'unknown=%s, ignored=%s, clean=%s>'
119 r'unknown=%s, ignored=%s, clean=%s>'
120 ) % tuple(pycompat.sysstr(stringutil.pprint(v)) for v in self)
120 ) % tuple(pycompat.sysstr(stringutil.pprint(v)) for v in self)
121
121
122
122
123 def itersubrepos(ctx1, ctx2):
123 def itersubrepos(ctx1, ctx2):
124 """find subrepos in ctx1 or ctx2"""
124 """find subrepos in ctx1 or ctx2"""
125 # Create a (subpath, ctx) mapping where we prefer subpaths from
125 # Create a (subpath, ctx) mapping where we prefer subpaths from
126 # ctx1. The subpaths from ctx2 are important when the .hgsub file
126 # ctx1. The subpaths from ctx2 are important when the .hgsub file
127 # has been modified (in ctx2) but not yet committed (in ctx1).
127 # has been modified (in ctx2) but not yet committed (in ctx1).
128 subpaths = dict.fromkeys(ctx2.substate, ctx2)
128 subpaths = dict.fromkeys(ctx2.substate, ctx2)
129 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
129 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
130
130
131 missing = set()
131 missing = set()
132
132
133 for subpath in ctx2.substate:
133 for subpath in ctx2.substate:
134 if subpath not in ctx1.substate:
134 if subpath not in ctx1.substate:
135 del subpaths[subpath]
135 del subpaths[subpath]
136 missing.add(subpath)
136 missing.add(subpath)
137
137
138 for subpath, ctx in sorted(pycompat.iteritems(subpaths)):
138 for subpath, ctx in sorted(pycompat.iteritems(subpaths)):
139 yield subpath, ctx.sub(subpath)
139 yield subpath, ctx.sub(subpath)
140
140
141 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
141 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
142 # status and diff will have an accurate result when it does
142 # status and diff will have an accurate result when it does
143 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
143 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
144 # against itself.
144 # against itself.
145 for subpath in missing:
145 for subpath in missing:
146 yield subpath, ctx2.nullsub(subpath, ctx1)
146 yield subpath, ctx2.nullsub(subpath, ctx1)
147
147
148
148
149 def nochangesfound(ui, repo, excluded=None):
149 def nochangesfound(ui, repo, excluded=None):
150 '''Report no changes for push/pull, excluded is None or a list of
150 '''Report no changes for push/pull, excluded is None or a list of
151 nodes excluded from the push/pull.
151 nodes excluded from the push/pull.
152 '''
152 '''
153 secretlist = []
153 secretlist = []
154 if excluded:
154 if excluded:
155 for n in excluded:
155 for n in excluded:
156 ctx = repo[n]
156 ctx = repo[n]
157 if ctx.phase() >= phases.secret and not ctx.extinct():
157 if ctx.phase() >= phases.secret and not ctx.extinct():
158 secretlist.append(n)
158 secretlist.append(n)
159
159
160 if secretlist:
160 if secretlist:
161 ui.status(
161 ui.status(
162 _(b"no changes found (ignored %d secret changesets)\n")
162 _(b"no changes found (ignored %d secret changesets)\n")
163 % len(secretlist)
163 % len(secretlist)
164 )
164 )
165 else:
165 else:
166 ui.status(_(b"no changes found\n"))
166 ui.status(_(b"no changes found\n"))
167
167
168
168
169 def callcatch(ui, func):
169 def callcatch(ui, func):
170 """call func() with global exception handling
170 """call func() with global exception handling
171
171
172 return func() if no exception happens. otherwise do some error handling
172 return func() if no exception happens. otherwise do some error handling
173 and return an exit code accordingly. does not handle all exceptions.
173 and return an exit code accordingly. does not handle all exceptions.
174 """
174 """
175 try:
175 try:
176 try:
176 try:
177 return func()
177 return func()
178 except: # re-raises
178 except: # re-raises
179 ui.traceback()
179 ui.traceback()
180 raise
180 raise
181 # Global exception handling, alphabetically
181 # Global exception handling, alphabetically
182 # Mercurial-specific first, followed by built-in and library exceptions
182 # Mercurial-specific first, followed by built-in and library exceptions
183 except error.LockHeld as inst:
183 except error.LockHeld as inst:
184 if inst.errno == errno.ETIMEDOUT:
184 if inst.errno == errno.ETIMEDOUT:
185 reason = _(b'timed out waiting for lock held by %r') % (
185 reason = _(b'timed out waiting for lock held by %r') % (
186 pycompat.bytestr(inst.locker)
186 pycompat.bytestr(inst.locker)
187 )
187 )
188 else:
188 else:
189 reason = _(b'lock held by %r') % inst.locker
189 reason = _(b'lock held by %r') % inst.locker
190 ui.error(
190 ui.error(
191 _(b"abort: %s: %s\n")
191 _(b"abort: %s: %s\n")
192 % (inst.desc or stringutil.forcebytestr(inst.filename), reason)
192 % (inst.desc or stringutil.forcebytestr(inst.filename), reason)
193 )
193 )
194 if not inst.locker:
194 if not inst.locker:
195 ui.error(_(b"(lock might be very busy)\n"))
195 ui.error(_(b"(lock might be very busy)\n"))
196 except error.LockUnavailable as inst:
196 except error.LockUnavailable as inst:
197 ui.error(
197 ui.error(
198 _(b"abort: could not lock %s: %s\n")
198 _(b"abort: could not lock %s: %s\n")
199 % (
199 % (
200 inst.desc or stringutil.forcebytestr(inst.filename),
200 inst.desc or stringutil.forcebytestr(inst.filename),
201 encoding.strtolocal(inst.strerror),
201 encoding.strtolocal(inst.strerror),
202 )
202 )
203 )
203 )
204 except error.OutOfBandError as inst:
204 except error.OutOfBandError as inst:
205 if inst.args:
205 if inst.args:
206 msg = _(b"abort: remote error:\n")
206 msg = _(b"abort: remote error:\n")
207 else:
207 else:
208 msg = _(b"abort: remote error\n")
208 msg = _(b"abort: remote error\n")
209 ui.error(msg)
209 ui.error(msg)
210 if inst.args:
210 if inst.args:
211 ui.error(b''.join(inst.args))
211 ui.error(b''.join(inst.args))
212 if inst.hint:
212 if inst.hint:
213 ui.error(b'(%s)\n' % inst.hint)
213 ui.error(b'(%s)\n' % inst.hint)
214 except error.RepoError as inst:
214 except error.RepoError as inst:
215 ui.error(_(b"abort: %s!\n") % inst)
215 ui.error(_(b"abort: %s!\n") % inst)
216 if inst.hint:
216 if inst.hint:
217 ui.error(_(b"(%s)\n") % inst.hint)
217 ui.error(_(b"(%s)\n") % inst.hint)
218 except error.ResponseError as inst:
218 except error.ResponseError as inst:
219 ui.error(_(b"abort: %s") % inst.args[0])
219 ui.error(_(b"abort: %s") % inst.args[0])
220 msg = inst.args[1]
220 msg = inst.args[1]
221 if isinstance(msg, type(u'')):
221 if isinstance(msg, type(u'')):
222 msg = pycompat.sysbytes(msg)
222 msg = pycompat.sysbytes(msg)
223 if not isinstance(msg, bytes):
223 if not isinstance(msg, bytes):
224 ui.error(b" %r\n" % (msg,))
224 ui.error(b" %r\n" % (msg,))
225 elif not msg:
225 elif not msg:
226 ui.error(_(b" empty string\n"))
226 ui.error(_(b" empty string\n"))
227 else:
227 else:
228 ui.error(b"\n%r\n" % pycompat.bytestr(stringutil.ellipsis(msg)))
228 ui.error(b"\n%r\n" % pycompat.bytestr(stringutil.ellipsis(msg)))
229 except error.CensoredNodeError as inst:
229 except error.CensoredNodeError as inst:
230 ui.error(_(b"abort: file censored %s!\n") % inst)
230 ui.error(_(b"abort: file censored %s!\n") % inst)
231 except error.StorageError as inst:
231 except error.StorageError as inst:
232 ui.error(_(b"abort: %s!\n") % inst)
232 ui.error(_(b"abort: %s!\n") % inst)
233 if inst.hint:
233 if inst.hint:
234 ui.error(_(b"(%s)\n") % inst.hint)
234 ui.error(_(b"(%s)\n") % inst.hint)
235 except error.InterventionRequired as inst:
235 except error.InterventionRequired as inst:
236 ui.error(b"%s\n" % inst)
236 ui.error(b"%s\n" % inst)
237 if inst.hint:
237 if inst.hint:
238 ui.error(_(b"(%s)\n") % inst.hint)
238 ui.error(_(b"(%s)\n") % inst.hint)
239 return 1
239 return 1
240 except error.WdirUnsupported:
240 except error.WdirUnsupported:
241 ui.error(_(b"abort: working directory revision cannot be specified\n"))
241 ui.error(_(b"abort: working directory revision cannot be specified\n"))
242 except error.Abort as inst:
242 except error.Abort as inst:
243 ui.error(_(b"abort: %s\n") % inst)
243 ui.error(_(b"abort: %s\n") % inst)
244 if inst.hint:
244 if inst.hint:
245 ui.error(_(b"(%s)\n") % inst.hint)
245 ui.error(_(b"(%s)\n") % inst.hint)
246 except ImportError as inst:
246 except ImportError as inst:
247 ui.error(_(b"abort: %s!\n") % stringutil.forcebytestr(inst))
247 ui.error(_(b"abort: %s!\n") % stringutil.forcebytestr(inst))
248 m = stringutil.forcebytestr(inst).split()[-1]
248 m = stringutil.forcebytestr(inst).split()[-1]
249 if m in b"mpatch bdiff".split():
249 if m in b"mpatch bdiff".split():
250 ui.error(_(b"(did you forget to compile extensions?)\n"))
250 ui.error(_(b"(did you forget to compile extensions?)\n"))
251 elif m in b"zlib".split():
251 elif m in b"zlib".split():
252 ui.error(_(b"(is your Python install correct?)\n"))
252 ui.error(_(b"(is your Python install correct?)\n"))
253 except (IOError, OSError) as inst:
253 except (IOError, OSError) as inst:
254 if util.safehasattr(inst, b"code"): # HTTPError
254 if util.safehasattr(inst, b"code"): # HTTPError
255 ui.error(_(b"abort: %s\n") % stringutil.forcebytestr(inst))
255 ui.error(_(b"abort: %s\n") % stringutil.forcebytestr(inst))
256 elif util.safehasattr(inst, b"reason"): # URLError or SSLError
256 elif util.safehasattr(inst, b"reason"): # URLError or SSLError
257 try: # usually it is in the form (errno, strerror)
257 try: # usually it is in the form (errno, strerror)
258 reason = inst.reason.args[1]
258 reason = inst.reason.args[1]
259 except (AttributeError, IndexError):
259 except (AttributeError, IndexError):
260 # it might be anything, for example a string
260 # it might be anything, for example a string
261 reason = inst.reason
261 reason = inst.reason
262 if isinstance(reason, pycompat.unicode):
262 if isinstance(reason, pycompat.unicode):
263 # SSLError of Python 2.7.9 contains a unicode
263 # SSLError of Python 2.7.9 contains a unicode
264 reason = encoding.unitolocal(reason)
264 reason = encoding.unitolocal(reason)
265 ui.error(_(b"abort: error: %s\n") % reason)
265 ui.error(_(b"abort: error: %s\n") % reason)
266 elif (
266 elif (
267 util.safehasattr(inst, b"args")
267 util.safehasattr(inst, b"args")
268 and inst.args
268 and inst.args
269 and inst.args[0] == errno.EPIPE
269 and inst.args[0] == errno.EPIPE
270 ):
270 ):
271 pass
271 pass
272 elif getattr(inst, "strerror", None): # common IOError or OSError
272 elif getattr(inst, "strerror", None): # common IOError or OSError
273 if getattr(inst, "filename", None) is not None:
273 if getattr(inst, "filename", None) is not None:
274 ui.error(
274 ui.error(
275 _(b"abort: %s: '%s'\n")
275 _(b"abort: %s: '%s'\n")
276 % (
276 % (
277 encoding.strtolocal(inst.strerror),
277 encoding.strtolocal(inst.strerror),
278 stringutil.forcebytestr(inst.filename),
278 stringutil.forcebytestr(inst.filename),
279 )
279 )
280 )
280 )
281 else:
281 else:
282 ui.error(_(b"abort: %s\n") % encoding.strtolocal(inst.strerror))
282 ui.error(_(b"abort: %s\n") % encoding.strtolocal(inst.strerror))
283 else: # suspicious IOError
283 else: # suspicious IOError
284 raise
284 raise
285 except MemoryError:
285 except MemoryError:
286 ui.error(_(b"abort: out of memory\n"))
286 ui.error(_(b"abort: out of memory\n"))
287 except SystemExit as inst:
287 except SystemExit as inst:
288 # Commands shouldn't sys.exit directly, but give a return code.
288 # Commands shouldn't sys.exit directly, but give a return code.
289 # Just in case catch this and and pass exit code to caller.
289 # Just in case catch this and and pass exit code to caller.
290 return inst.code
290 return inst.code
291
291
292 return -1
292 return -1
293
293
294
294
295 def checknewlabel(repo, lbl, kind):
295 def checknewlabel(repo, lbl, kind):
296 # Do not use the "kind" parameter in ui output.
296 # Do not use the "kind" parameter in ui output.
297 # It makes strings difficult to translate.
297 # It makes strings difficult to translate.
298 if lbl in [b'tip', b'.', b'null']:
298 if lbl in [b'tip', b'.', b'null']:
299 raise error.Abort(_(b"the name '%s' is reserved") % lbl)
299 raise error.Abort(_(b"the name '%s' is reserved") % lbl)
300 for c in (b':', b'\0', b'\n', b'\r'):
300 for c in (b':', b'\0', b'\n', b'\r'):
301 if c in lbl:
301 if c in lbl:
302 raise error.Abort(
302 raise error.Abort(
303 _(b"%r cannot be used in a name") % pycompat.bytestr(c)
303 _(b"%r cannot be used in a name") % pycompat.bytestr(c)
304 )
304 )
305 try:
305 try:
306 int(lbl)
306 int(lbl)
307 raise error.Abort(_(b"cannot use an integer as a name"))
307 raise error.Abort(_(b"cannot use an integer as a name"))
308 except ValueError:
308 except ValueError:
309 pass
309 pass
310 if lbl.strip() != lbl:
310 if lbl.strip() != lbl:
311 raise error.Abort(_(b"leading or trailing whitespace in name %r") % lbl)
311 raise error.Abort(_(b"leading or trailing whitespace in name %r") % lbl)
312
312
313
313
314 def checkfilename(f):
314 def checkfilename(f):
315 '''Check that the filename f is an acceptable filename for a tracked file'''
315 '''Check that the filename f is an acceptable filename for a tracked file'''
316 if b'\r' in f or b'\n' in f:
316 if b'\r' in f or b'\n' in f:
317 raise error.Abort(
317 raise error.Abort(
318 _(b"'\\n' and '\\r' disallowed in filenames: %r")
318 _(b"'\\n' and '\\r' disallowed in filenames: %r")
319 % pycompat.bytestr(f)
319 % pycompat.bytestr(f)
320 )
320 )
321
321
322
322
323 def checkportable(ui, f):
323 def checkportable(ui, f):
324 '''Check if filename f is portable and warn or abort depending on config'''
324 '''Check if filename f is portable and warn or abort depending on config'''
325 checkfilename(f)
325 checkfilename(f)
326 abort, warn = checkportabilityalert(ui)
326 abort, warn = checkportabilityalert(ui)
327 if abort or warn:
327 if abort or warn:
328 msg = util.checkwinfilename(f)
328 msg = util.checkwinfilename(f)
329 if msg:
329 if msg:
330 msg = b"%s: %s" % (msg, procutil.shellquote(f))
330 msg = b"%s: %s" % (msg, procutil.shellquote(f))
331 if abort:
331 if abort:
332 raise error.Abort(msg)
332 raise error.Abort(msg)
333 ui.warn(_(b"warning: %s\n") % msg)
333 ui.warn(_(b"warning: %s\n") % msg)
334
334
335
335
336 def checkportabilityalert(ui):
336 def checkportabilityalert(ui):
337 '''check if the user's config requests nothing, a warning, or abort for
337 '''check if the user's config requests nothing, a warning, or abort for
338 non-portable filenames'''
338 non-portable filenames'''
339 val = ui.config(b'ui', b'portablefilenames')
339 val = ui.config(b'ui', b'portablefilenames')
340 lval = val.lower()
340 lval = val.lower()
341 bval = stringutil.parsebool(val)
341 bval = stringutil.parsebool(val)
342 abort = pycompat.iswindows or lval == b'abort'
342 abort = pycompat.iswindows or lval == b'abort'
343 warn = bval or lval == b'warn'
343 warn = bval or lval == b'warn'
344 if bval is None and not (warn or abort or lval == b'ignore'):
344 if bval is None and not (warn or abort or lval == b'ignore'):
345 raise error.ConfigError(
345 raise error.ConfigError(
346 _(b"ui.portablefilenames value is invalid ('%s')") % val
346 _(b"ui.portablefilenames value is invalid ('%s')") % val
347 )
347 )
348 return abort, warn
348 return abort, warn
349
349
350
350
351 class casecollisionauditor(object):
351 class casecollisionauditor(object):
352 def __init__(self, ui, abort, dirstate):
352 def __init__(self, ui, abort, dirstate):
353 self._ui = ui
353 self._ui = ui
354 self._abort = abort
354 self._abort = abort
355 allfiles = b'\0'.join(dirstate)
355 allfiles = b'\0'.join(dirstate)
356 self._loweredfiles = set(encoding.lower(allfiles).split(b'\0'))
356 self._loweredfiles = set(encoding.lower(allfiles).split(b'\0'))
357 self._dirstate = dirstate
357 self._dirstate = dirstate
358 # The purpose of _newfiles is so that we don't complain about
358 # The purpose of _newfiles is so that we don't complain about
359 # case collisions if someone were to call this object with the
359 # case collisions if someone were to call this object with the
360 # same filename twice.
360 # same filename twice.
361 self._newfiles = set()
361 self._newfiles = set()
362
362
363 def __call__(self, f):
363 def __call__(self, f):
364 if f in self._newfiles:
364 if f in self._newfiles:
365 return
365 return
366 fl = encoding.lower(f)
366 fl = encoding.lower(f)
367 if fl in self._loweredfiles and f not in self._dirstate:
367 if fl in self._loweredfiles and f not in self._dirstate:
368 msg = _(b'possible case-folding collision for %s') % f
368 msg = _(b'possible case-folding collision for %s') % f
369 if self._abort:
369 if self._abort:
370 raise error.Abort(msg)
370 raise error.Abort(msg)
371 self._ui.warn(_(b"warning: %s\n") % msg)
371 self._ui.warn(_(b"warning: %s\n") % msg)
372 self._loweredfiles.add(fl)
372 self._loweredfiles.add(fl)
373 self._newfiles.add(f)
373 self._newfiles.add(f)
374
374
375
375
376 def filteredhash(repo, maxrev):
376 def filteredhash(repo, maxrev):
377 """build hash of filtered revisions in the current repoview.
377 """build hash of filtered revisions in the current repoview.
378
378
379 Multiple caches perform up-to-date validation by checking that the
379 Multiple caches perform up-to-date validation by checking that the
380 tiprev and tipnode stored in the cache file match the current repository.
380 tiprev and tipnode stored in the cache file match the current repository.
381 However, this is not sufficient for validating repoviews because the set
381 However, this is not sufficient for validating repoviews because the set
382 of revisions in the view may change without the repository tiprev and
382 of revisions in the view may change without the repository tiprev and
383 tipnode changing.
383 tipnode changing.
384
384
385 This function hashes all the revs filtered from the view and returns
385 This function hashes all the revs filtered from the view and returns
386 that SHA-1 digest.
386 that SHA-1 digest.
387 """
387 """
388 cl = repo.changelog
388 cl = repo.changelog
389 if not cl.filteredrevs:
389 if not cl.filteredrevs:
390 return None
390 return None
391 key = None
391 key = None
392 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
392 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
393 if revs:
393 if revs:
394 s = hashlib.sha1()
394 s = hashlib.sha1()
395 for rev in revs:
395 for rev in revs:
396 s.update(b'%d;' % rev)
396 s.update(b'%d;' % rev)
397 key = s.digest()
397 key = s.digest()
398 return key
398 return key
399
399
400
400
401 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
401 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
402 '''yield every hg repository under path, always recursively.
402 '''yield every hg repository under path, always recursively.
403 The recurse flag will only control recursion into repo working dirs'''
403 The recurse flag will only control recursion into repo working dirs'''
404
404
405 def errhandler(err):
405 def errhandler(err):
406 if err.filename == path:
406 if err.filename == path:
407 raise err
407 raise err
408
408
409 samestat = getattr(os.path, 'samestat', None)
409 samestat = getattr(os.path, 'samestat', None)
410 if followsym and samestat is not None:
410 if followsym and samestat is not None:
411
411
412 def adddir(dirlst, dirname):
412 def adddir(dirlst, dirname):
413 dirstat = os.stat(dirname)
413 dirstat = os.stat(dirname)
414 match = any(samestat(dirstat, lstdirstat) for lstdirstat in dirlst)
414 match = any(samestat(dirstat, lstdirstat) for lstdirstat in dirlst)
415 if not match:
415 if not match:
416 dirlst.append(dirstat)
416 dirlst.append(dirstat)
417 return not match
417 return not match
418
418
419 else:
419 else:
420 followsym = False
420 followsym = False
421
421
422 if (seen_dirs is None) and followsym:
422 if (seen_dirs is None) and followsym:
423 seen_dirs = []
423 seen_dirs = []
424 adddir(seen_dirs, path)
424 adddir(seen_dirs, path)
425 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
425 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
426 dirs.sort()
426 dirs.sort()
427 if b'.hg' in dirs:
427 if b'.hg' in dirs:
428 yield root # found a repository
428 yield root # found a repository
429 qroot = os.path.join(root, b'.hg', b'patches')
429 qroot = os.path.join(root, b'.hg', b'patches')
430 if os.path.isdir(os.path.join(qroot, b'.hg')):
430 if os.path.isdir(os.path.join(qroot, b'.hg')):
431 yield qroot # we have a patch queue repo here
431 yield qroot # we have a patch queue repo here
432 if recurse:
432 if recurse:
433 # avoid recursing inside the .hg directory
433 # avoid recursing inside the .hg directory
434 dirs.remove(b'.hg')
434 dirs.remove(b'.hg')
435 else:
435 else:
436 dirs[:] = [] # don't descend further
436 dirs[:] = [] # don't descend further
437 elif followsym:
437 elif followsym:
438 newdirs = []
438 newdirs = []
439 for d in dirs:
439 for d in dirs:
440 fname = os.path.join(root, d)
440 fname = os.path.join(root, d)
441 if adddir(seen_dirs, fname):
441 if adddir(seen_dirs, fname):
442 if os.path.islink(fname):
442 if os.path.islink(fname):
443 for hgname in walkrepos(fname, True, seen_dirs):
443 for hgname in walkrepos(fname, True, seen_dirs):
444 yield hgname
444 yield hgname
445 else:
445 else:
446 newdirs.append(d)
446 newdirs.append(d)
447 dirs[:] = newdirs
447 dirs[:] = newdirs
448
448
449
449
450 def binnode(ctx):
450 def binnode(ctx):
451 """Return binary node id for a given basectx"""
451 """Return binary node id for a given basectx"""
452 node = ctx.node()
452 node = ctx.node()
453 if node is None:
453 if node is None:
454 return wdirid
454 return wdirid
455 return node
455 return node
456
456
457
457
458 def intrev(ctx):
458 def intrev(ctx):
459 """Return integer for a given basectx that can be used in comparison or
459 """Return integer for a given basectx that can be used in comparison or
460 arithmetic operation"""
460 arithmetic operation"""
461 rev = ctx.rev()
461 rev = ctx.rev()
462 if rev is None:
462 if rev is None:
463 return wdirrev
463 return wdirrev
464 return rev
464 return rev
465
465
466
466
467 def formatchangeid(ctx):
467 def formatchangeid(ctx):
468 """Format changectx as '{rev}:{node|formatnode}', which is the default
468 """Format changectx as '{rev}:{node|formatnode}', which is the default
469 template provided by logcmdutil.changesettemplater"""
469 template provided by logcmdutil.changesettemplater"""
470 repo = ctx.repo()
470 repo = ctx.repo()
471 return formatrevnode(repo.ui, intrev(ctx), binnode(ctx))
471 return formatrevnode(repo.ui, intrev(ctx), binnode(ctx))
472
472
473
473
474 def formatrevnode(ui, rev, node):
474 def formatrevnode(ui, rev, node):
475 """Format given revision and node depending on the current verbosity"""
475 """Format given revision and node depending on the current verbosity"""
476 if ui.debugflag:
476 if ui.debugflag:
477 hexfunc = hex
477 hexfunc = hex
478 else:
478 else:
479 hexfunc = short
479 hexfunc = short
480 return b'%d:%s' % (rev, hexfunc(node))
480 return b'%d:%s' % (rev, hexfunc(node))
481
481
482
482
483 def resolvehexnodeidprefix(repo, prefix):
483 def resolvehexnodeidprefix(repo, prefix):
484 if prefix.startswith(b'x') and repo.ui.configbool(
484 if prefix.startswith(b'x') and repo.ui.configbool(
485 b'experimental', b'revisions.prefixhexnode'
485 b'experimental', b'revisions.prefixhexnode'
486 ):
486 ):
487 prefix = prefix[1:]
487 prefix = prefix[1:]
488 try:
488 try:
489 # Uses unfiltered repo because it's faster when prefix is ambiguous/
489 # Uses unfiltered repo because it's faster when prefix is ambiguous/
490 # This matches the shortesthexnodeidprefix() function below.
490 # This matches the shortesthexnodeidprefix() function below.
491 node = repo.unfiltered().changelog._partialmatch(prefix)
491 node = repo.unfiltered().changelog._partialmatch(prefix)
492 except error.AmbiguousPrefixLookupError:
492 except error.AmbiguousPrefixLookupError:
493 revset = repo.ui.config(
493 revset = repo.ui.config(
494 b'experimental', b'revisions.disambiguatewithin'
494 b'experimental', b'revisions.disambiguatewithin'
495 )
495 )
496 if revset:
496 if revset:
497 # Clear config to avoid infinite recursion
497 # Clear config to avoid infinite recursion
498 configoverrides = {
498 configoverrides = {
499 (b'experimental', b'revisions.disambiguatewithin'): None
499 (b'experimental', b'revisions.disambiguatewithin'): None
500 }
500 }
501 with repo.ui.configoverride(configoverrides):
501 with repo.ui.configoverride(configoverrides):
502 revs = repo.anyrevs([revset], user=True)
502 revs = repo.anyrevs([revset], user=True)
503 matches = []
503 matches = []
504 for rev in revs:
504 for rev in revs:
505 node = repo.changelog.node(rev)
505 node = repo.changelog.node(rev)
506 if hex(node).startswith(prefix):
506 if hex(node).startswith(prefix):
507 matches.append(node)
507 matches.append(node)
508 if len(matches) == 1:
508 if len(matches) == 1:
509 return matches[0]
509 return matches[0]
510 raise
510 raise
511 if node is None:
511 if node is None:
512 return
512 return
513 repo.changelog.rev(node) # make sure node isn't filtered
513 repo.changelog.rev(node) # make sure node isn't filtered
514 return node
514 return node
515
515
516
516
517 def mayberevnum(repo, prefix):
517 def mayberevnum(repo, prefix):
518 """Checks if the given prefix may be mistaken for a revision number"""
518 """Checks if the given prefix may be mistaken for a revision number"""
519 try:
519 try:
520 i = int(prefix)
520 i = int(prefix)
521 # if we are a pure int, then starting with zero will not be
521 # if we are a pure int, then starting with zero will not be
522 # confused as a rev; or, obviously, if the int is larger
522 # confused as a rev; or, obviously, if the int is larger
523 # than the value of the tip rev. We still need to disambiguate if
523 # than the value of the tip rev. We still need to disambiguate if
524 # prefix == '0', since that *is* a valid revnum.
524 # prefix == '0', since that *is* a valid revnum.
525 if (prefix != b'0' and prefix[0:1] == b'0') or i >= len(repo):
525 if (prefix != b'0' and prefix[0:1] == b'0') or i >= len(repo):
526 return False
526 return False
527 return True
527 return True
528 except ValueError:
528 except ValueError:
529 return False
529 return False
530
530
531
531
532 def shortesthexnodeidprefix(repo, node, minlength=1, cache=None):
532 def shortesthexnodeidprefix(repo, node, minlength=1, cache=None):
533 """Find the shortest unambiguous prefix that matches hexnode.
533 """Find the shortest unambiguous prefix that matches hexnode.
534
534
535 If "cache" is not None, it must be a dictionary that can be used for
535 If "cache" is not None, it must be a dictionary that can be used for
536 caching between calls to this method.
536 caching between calls to this method.
537 """
537 """
538 # _partialmatch() of filtered changelog could take O(len(repo)) time,
538 # _partialmatch() of filtered changelog could take O(len(repo)) time,
539 # which would be unacceptably slow. so we look for hash collision in
539 # which would be unacceptably slow. so we look for hash collision in
540 # unfiltered space, which means some hashes may be slightly longer.
540 # unfiltered space, which means some hashes may be slightly longer.
541
541
542 minlength = max(minlength, 1)
542 minlength = max(minlength, 1)
543
543
544 def disambiguate(prefix):
544 def disambiguate(prefix):
545 """Disambiguate against revnums."""
545 """Disambiguate against revnums."""
546 if repo.ui.configbool(b'experimental', b'revisions.prefixhexnode'):
546 if repo.ui.configbool(b'experimental', b'revisions.prefixhexnode'):
547 if mayberevnum(repo, prefix):
547 if mayberevnum(repo, prefix):
548 return b'x' + prefix
548 return b'x' + prefix
549 else:
549 else:
550 return prefix
550 return prefix
551
551
552 hexnode = hex(node)
552 hexnode = hex(node)
553 for length in range(len(prefix), len(hexnode) + 1):
553 for length in range(len(prefix), len(hexnode) + 1):
554 prefix = hexnode[:length]
554 prefix = hexnode[:length]
555 if not mayberevnum(repo, prefix):
555 if not mayberevnum(repo, prefix):
556 return prefix
556 return prefix
557
557
558 cl = repo.unfiltered().changelog
558 cl = repo.unfiltered().changelog
559 revset = repo.ui.config(b'experimental', b'revisions.disambiguatewithin')
559 revset = repo.ui.config(b'experimental', b'revisions.disambiguatewithin')
560 if revset:
560 if revset:
561 revs = None
561 revs = None
562 if cache is not None:
562 if cache is not None:
563 revs = cache.get(b'disambiguationrevset')
563 revs = cache.get(b'disambiguationrevset')
564 if revs is None:
564 if revs is None:
565 revs = repo.anyrevs([revset], user=True)
565 revs = repo.anyrevs([revset], user=True)
566 if cache is not None:
566 if cache is not None:
567 cache[b'disambiguationrevset'] = revs
567 cache[b'disambiguationrevset'] = revs
568 if cl.rev(node) in revs:
568 if cl.rev(node) in revs:
569 hexnode = hex(node)
569 hexnode = hex(node)
570 nodetree = None
570 nodetree = None
571 if cache is not None:
571 if cache is not None:
572 nodetree = cache.get(b'disambiguationnodetree')
572 nodetree = cache.get(b'disambiguationnodetree')
573 if not nodetree:
573 if not nodetree:
574 try:
574 try:
575 nodetree = parsers.nodetree(cl.index, len(revs))
575 nodetree = parsers.nodetree(cl.index, len(revs))
576 except AttributeError:
576 except AttributeError:
577 # no native nodetree
577 # no native nodetree
578 pass
578 pass
579 else:
579 else:
580 for r in revs:
580 for r in revs:
581 nodetree.insert(r)
581 nodetree.insert(r)
582 if cache is not None:
582 if cache is not None:
583 cache[b'disambiguationnodetree'] = nodetree
583 cache[b'disambiguationnodetree'] = nodetree
584 if nodetree is not None:
584 if nodetree is not None:
585 length = max(nodetree.shortest(node), minlength)
585 length = max(nodetree.shortest(node), minlength)
586 prefix = hexnode[:length]
586 prefix = hexnode[:length]
587 return disambiguate(prefix)
587 return disambiguate(prefix)
588 for length in range(minlength, len(hexnode) + 1):
588 for length in range(minlength, len(hexnode) + 1):
589 matches = []
589 matches = []
590 prefix = hexnode[:length]
590 prefix = hexnode[:length]
591 for rev in revs:
591 for rev in revs:
592 otherhexnode = repo[rev].hex()
592 otherhexnode = repo[rev].hex()
593 if prefix == otherhexnode[:length]:
593 if prefix == otherhexnode[:length]:
594 matches.append(otherhexnode)
594 matches.append(otherhexnode)
595 if len(matches) == 1:
595 if len(matches) == 1:
596 return disambiguate(prefix)
596 return disambiguate(prefix)
597
597
598 try:
598 try:
599 return disambiguate(cl.shortest(node, minlength))
599 return disambiguate(cl.shortest(node, minlength))
600 except error.LookupError:
600 except error.LookupError:
601 raise error.RepoLookupError()
601 raise error.RepoLookupError()
602
602
603
603
604 def isrevsymbol(repo, symbol):
604 def isrevsymbol(repo, symbol):
605 """Checks if a symbol exists in the repo.
605 """Checks if a symbol exists in the repo.
606
606
607 See revsymbol() for details. Raises error.AmbiguousPrefixLookupError if the
607 See revsymbol() for details. Raises error.AmbiguousPrefixLookupError if the
608 symbol is an ambiguous nodeid prefix.
608 symbol is an ambiguous nodeid prefix.
609 """
609 """
610 try:
610 try:
611 revsymbol(repo, symbol)
611 revsymbol(repo, symbol)
612 return True
612 return True
613 except error.RepoLookupError:
613 except error.RepoLookupError:
614 return False
614 return False
615
615
616
616
617 def revsymbol(repo, symbol):
617 def revsymbol(repo, symbol):
618 """Returns a context given a single revision symbol (as string).
618 """Returns a context given a single revision symbol (as string).
619
619
620 This is similar to revsingle(), but accepts only a single revision symbol,
620 This is similar to revsingle(), but accepts only a single revision symbol,
621 i.e. things like ".", "tip", "1234", "deadbeef", "my-bookmark" work, but
621 i.e. things like ".", "tip", "1234", "deadbeef", "my-bookmark" work, but
622 not "max(public())".
622 not "max(public())".
623 """
623 """
624 if not isinstance(symbol, bytes):
624 if not isinstance(symbol, bytes):
625 msg = (
625 msg = (
626 b"symbol (%s of type %s) was not a string, did you mean "
626 b"symbol (%s of type %s) was not a string, did you mean "
627 b"repo[symbol]?" % (symbol, type(symbol))
627 b"repo[symbol]?" % (symbol, type(symbol))
628 )
628 )
629 raise error.ProgrammingError(msg)
629 raise error.ProgrammingError(msg)
630 try:
630 try:
631 if symbol in (b'.', b'tip', b'null'):
631 if symbol in (b'.', b'tip', b'null'):
632 return repo[symbol]
632 return repo[symbol]
633
633
634 try:
634 try:
635 r = int(symbol)
635 r = int(symbol)
636 if b'%d' % r != symbol:
636 if b'%d' % r != symbol:
637 raise ValueError
637 raise ValueError
638 l = len(repo.changelog)
638 l = len(repo.changelog)
639 if r < 0:
639 if r < 0:
640 r += l
640 r += l
641 if r < 0 or r >= l and r != wdirrev:
641 if r < 0 or r >= l and r != wdirrev:
642 raise ValueError
642 raise ValueError
643 return repo[r]
643 return repo[r]
644 except error.FilteredIndexError:
644 except error.FilteredIndexError:
645 raise
645 raise
646 except (ValueError, OverflowError, IndexError):
646 except (ValueError, OverflowError, IndexError):
647 pass
647 pass
648
648
649 if len(symbol) == 40:
649 if len(symbol) == 40:
650 try:
650 try:
651 node = bin(symbol)
651 node = bin(symbol)
652 rev = repo.changelog.rev(node)
652 rev = repo.changelog.rev(node)
653 return repo[rev]
653 return repo[rev]
654 except error.FilteredLookupError:
654 except error.FilteredLookupError:
655 raise
655 raise
656 except (TypeError, LookupError):
656 except (TypeError, LookupError):
657 pass
657 pass
658
658
659 # look up bookmarks through the name interface
659 # look up bookmarks through the name interface
660 try:
660 try:
661 node = repo.names.singlenode(repo, symbol)
661 node = repo.names.singlenode(repo, symbol)
662 rev = repo.changelog.rev(node)
662 rev = repo.changelog.rev(node)
663 return repo[rev]
663 return repo[rev]
664 except KeyError:
664 except KeyError:
665 pass
665 pass
666
666
667 node = resolvehexnodeidprefix(repo, symbol)
667 node = resolvehexnodeidprefix(repo, symbol)
668 if node is not None:
668 if node is not None:
669 rev = repo.changelog.rev(node)
669 rev = repo.changelog.rev(node)
670 return repo[rev]
670 return repo[rev]
671
671
672 raise error.RepoLookupError(_(b"unknown revision '%s'") % symbol)
672 raise error.RepoLookupError(_(b"unknown revision '%s'") % symbol)
673
673
674 except error.WdirUnsupported:
674 except error.WdirUnsupported:
675 return repo[None]
675 return repo[None]
676 except (
676 except (
677 error.FilteredIndexError,
677 error.FilteredIndexError,
678 error.FilteredLookupError,
678 error.FilteredLookupError,
679 error.FilteredRepoLookupError,
679 error.FilteredRepoLookupError,
680 ):
680 ):
681 raise _filterederror(repo, symbol)
681 raise _filterederror(repo, symbol)
682
682
683
683
684 def _filterederror(repo, changeid):
684 def _filterederror(repo, changeid):
685 """build an exception to be raised about a filtered changeid
685 """build an exception to be raised about a filtered changeid
686
686
687 This is extracted in a function to help extensions (eg: evolve) to
687 This is extracted in a function to help extensions (eg: evolve) to
688 experiment with various message variants."""
688 experiment with various message variants."""
689 if repo.filtername.startswith(b'visible'):
689 if repo.filtername.startswith(b'visible'):
690
690
691 # Check if the changeset is obsolete
691 # Check if the changeset is obsolete
692 unfilteredrepo = repo.unfiltered()
692 unfilteredrepo = repo.unfiltered()
693 ctx = revsymbol(unfilteredrepo, changeid)
693 ctx = revsymbol(unfilteredrepo, changeid)
694
694
695 # If the changeset is obsolete, enrich the message with the reason
695 # If the changeset is obsolete, enrich the message with the reason
696 # that made this changeset not visible
696 # that made this changeset not visible
697 if ctx.obsolete():
697 if ctx.obsolete():
698 msg = obsutil._getfilteredreason(repo, changeid, ctx)
698 msg = obsutil._getfilteredreason(repo, changeid, ctx)
699 else:
699 else:
700 msg = _(b"hidden revision '%s'") % changeid
700 msg = _(b"hidden revision '%s'") % changeid
701
701
702 hint = _(b'use --hidden to access hidden revisions')
702 hint = _(b'use --hidden to access hidden revisions')
703
703
704 return error.FilteredRepoLookupError(msg, hint=hint)
704 return error.FilteredRepoLookupError(msg, hint=hint)
705 msg = _(b"filtered revision '%s' (not in '%s' subset)")
705 msg = _(b"filtered revision '%s' (not in '%s' subset)")
706 msg %= (changeid, repo.filtername)
706 msg %= (changeid, repo.filtername)
707 return error.FilteredRepoLookupError(msg)
707 return error.FilteredRepoLookupError(msg)
708
708
709
709
710 def revsingle(repo, revspec, default=b'.', localalias=None):
710 def revsingle(repo, revspec, default=b'.', localalias=None):
711 if not revspec and revspec != 0:
711 if not revspec and revspec != 0:
712 return repo[default]
712 return repo[default]
713
713
714 l = revrange(repo, [revspec], localalias=localalias)
714 l = revrange(repo, [revspec], localalias=localalias)
715 if not l:
715 if not l:
716 raise error.Abort(_(b'empty revision set'))
716 raise error.Abort(_(b'empty revision set'))
717 return repo[l.last()]
717 return repo[l.last()]
718
718
719
719
720 def _pairspec(revspec):
720 def _pairspec(revspec):
721 tree = revsetlang.parse(revspec)
721 tree = revsetlang.parse(revspec)
722 return tree and tree[0] in (
722 return tree and tree[0] in (
723 b'range',
723 b'range',
724 b'rangepre',
724 b'rangepre',
725 b'rangepost',
725 b'rangepost',
726 b'rangeall',
726 b'rangeall',
727 )
727 )
728
728
729
729
730 def revpair(repo, revs):
730 def revpair(repo, revs):
731 if not revs:
731 if not revs:
732 return repo[b'.'], repo[None]
732 return repo[b'.'], repo[None]
733
733
734 l = revrange(repo, revs)
734 l = revrange(repo, revs)
735
735
736 if not l:
736 if not l:
737 raise error.Abort(_(b'empty revision range'))
737 raise error.Abort(_(b'empty revision range'))
738
738
739 first = l.first()
739 first = l.first()
740 second = l.last()
740 second = l.last()
741
741
742 if (
742 if (
743 first == second
743 first == second
744 and len(revs) >= 2
744 and len(revs) >= 2
745 and not all(revrange(repo, [r]) for r in revs)
745 and not all(revrange(repo, [r]) for r in revs)
746 ):
746 ):
747 raise error.Abort(_(b'empty revision on one side of range'))
747 raise error.Abort(_(b'empty revision on one side of range'))
748
748
749 # if top-level is range expression, the result must always be a pair
749 # if top-level is range expression, the result must always be a pair
750 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
750 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
751 return repo[first], repo[None]
751 return repo[first], repo[None]
752
752
753 return repo[first], repo[second]
753 return repo[first], repo[second]
754
754
755
755
756 def revrange(repo, specs, localalias=None):
756 def revrange(repo, specs, localalias=None):
757 """Execute 1 to many revsets and return the union.
757 """Execute 1 to many revsets and return the union.
758
758
759 This is the preferred mechanism for executing revsets using user-specified
759 This is the preferred mechanism for executing revsets using user-specified
760 config options, such as revset aliases.
760 config options, such as revset aliases.
761
761
762 The revsets specified by ``specs`` will be executed via a chained ``OR``
762 The revsets specified by ``specs`` will be executed via a chained ``OR``
763 expression. If ``specs`` is empty, an empty result is returned.
763 expression. If ``specs`` is empty, an empty result is returned.
764
764
765 ``specs`` can contain integers, in which case they are assumed to be
765 ``specs`` can contain integers, in which case they are assumed to be
766 revision numbers.
766 revision numbers.
767
767
768 It is assumed the revsets are already formatted. If you have arguments
768 It is assumed the revsets are already formatted. If you have arguments
769 that need to be expanded in the revset, call ``revsetlang.formatspec()``
769 that need to be expanded in the revset, call ``revsetlang.formatspec()``
770 and pass the result as an element of ``specs``.
770 and pass the result as an element of ``specs``.
771
771
772 Specifying a single revset is allowed.
772 Specifying a single revset is allowed.
773
773
774 Returns a ``revset.abstractsmartset`` which is a list-like interface over
774 Returns a ``revset.abstractsmartset`` which is a list-like interface over
775 integer revisions.
775 integer revisions.
776 """
776 """
777 allspecs = []
777 allspecs = []
778 for spec in specs:
778 for spec in specs:
779 if isinstance(spec, int):
779 if isinstance(spec, int):
780 spec = revsetlang.formatspec(b'%d', spec)
780 spec = revsetlang.formatspec(b'%d', spec)
781 allspecs.append(spec)
781 allspecs.append(spec)
782 return repo.anyrevs(allspecs, user=True, localalias=localalias)
782 return repo.anyrevs(allspecs, user=True, localalias=localalias)
783
783
784
784
785 def meaningfulparents(repo, ctx):
785 def meaningfulparents(repo, ctx):
786 """Return list of meaningful (or all if debug) parentrevs for rev.
786 """Return list of meaningful (or all if debug) parentrevs for rev.
787
787
788 For merges (two non-nullrev revisions) both parents are meaningful.
788 For merges (two non-nullrev revisions) both parents are meaningful.
789 Otherwise the first parent revision is considered meaningful if it
789 Otherwise the first parent revision is considered meaningful if it
790 is not the preceding revision.
790 is not the preceding revision.
791 """
791 """
792 parents = ctx.parents()
792 parents = ctx.parents()
793 if len(parents) > 1:
793 if len(parents) > 1:
794 return parents
794 return parents
795 if repo.ui.debugflag:
795 if repo.ui.debugflag:
796 return [parents[0], repo[nullrev]]
796 return [parents[0], repo[nullrev]]
797 if parents[0].rev() >= intrev(ctx) - 1:
797 if parents[0].rev() >= intrev(ctx) - 1:
798 return []
798 return []
799 return parents
799 return parents
800
800
801
801
802 def getuipathfn(repo, legacyrelativevalue=False, forcerelativevalue=None):
802 def getuipathfn(repo, legacyrelativevalue=False, forcerelativevalue=None):
803 """Return a function that produced paths for presenting to the user.
803 """Return a function that produced paths for presenting to the user.
804
804
805 The returned function takes a repo-relative path and produces a path
805 The returned function takes a repo-relative path and produces a path
806 that can be presented in the UI.
806 that can be presented in the UI.
807
807
808 Depending on the value of ui.relative-paths, either a repo-relative or
808 Depending on the value of ui.relative-paths, either a repo-relative or
809 cwd-relative path will be produced.
809 cwd-relative path will be produced.
810
810
811 legacyrelativevalue is the value to use if ui.relative-paths=legacy
811 legacyrelativevalue is the value to use if ui.relative-paths=legacy
812
812
813 If forcerelativevalue is not None, then that value will be used regardless
813 If forcerelativevalue is not None, then that value will be used regardless
814 of what ui.relative-paths is set to.
814 of what ui.relative-paths is set to.
815 """
815 """
816 if forcerelativevalue is not None:
816 if forcerelativevalue is not None:
817 relative = forcerelativevalue
817 relative = forcerelativevalue
818 else:
818 else:
819 config = repo.ui.config(b'ui', b'relative-paths')
819 config = repo.ui.config(b'ui', b'relative-paths')
820 if config == b'legacy':
820 if config == b'legacy':
821 relative = legacyrelativevalue
821 relative = legacyrelativevalue
822 else:
822 else:
823 relative = stringutil.parsebool(config)
823 relative = stringutil.parsebool(config)
824 if relative is None:
824 if relative is None:
825 raise error.ConfigError(
825 raise error.ConfigError(
826 _(b"ui.relative-paths is not a boolean ('%s')") % config
826 _(b"ui.relative-paths is not a boolean ('%s')") % config
827 )
827 )
828
828
829 if relative:
829 if relative:
830 cwd = repo.getcwd()
830 cwd = repo.getcwd()
831 pathto = repo.pathto
831 pathto = repo.pathto
832 return lambda f: pathto(f, cwd)
832 return lambda f: pathto(f, cwd)
833 elif repo.ui.configbool(b'ui', b'slash'):
833 elif repo.ui.configbool(b'ui', b'slash'):
834 return lambda f: f
834 return lambda f: f
835 else:
835 else:
836 return util.localpath
836 return util.localpath
837
837
838
838
839 def subdiruipathfn(subpath, uipathfn):
839 def subdiruipathfn(subpath, uipathfn):
840 '''Create a new uipathfn that treats the file as relative to subpath.'''
840 '''Create a new uipathfn that treats the file as relative to subpath.'''
841 return lambda f: uipathfn(posixpath.join(subpath, f))
841 return lambda f: uipathfn(posixpath.join(subpath, f))
842
842
843
843
844 def anypats(pats, opts):
844 def anypats(pats, opts):
845 '''Checks if any patterns, including --include and --exclude were given.
845 '''Checks if any patterns, including --include and --exclude were given.
846
846
847 Some commands (e.g. addremove) use this condition for deciding whether to
847 Some commands (e.g. addremove) use this condition for deciding whether to
848 print absolute or relative paths.
848 print absolute or relative paths.
849 '''
849 '''
850 return bool(pats or opts.get(b'include') or opts.get(b'exclude'))
850 return bool(pats or opts.get(b'include') or opts.get(b'exclude'))
851
851
852
852
853 def expandpats(pats):
853 def expandpats(pats):
854 '''Expand bare globs when running on windows.
854 '''Expand bare globs when running on windows.
855 On posix we assume it already has already been done by sh.'''
855 On posix we assume it already has already been done by sh.'''
856 if not util.expandglobs:
856 if not util.expandglobs:
857 return list(pats)
857 return list(pats)
858 ret = []
858 ret = []
859 for kindpat in pats:
859 for kindpat in pats:
860 kind, pat = matchmod._patsplit(kindpat, None)
860 kind, pat = matchmod._patsplit(kindpat, None)
861 if kind is None:
861 if kind is None:
862 try:
862 try:
863 globbed = glob.glob(pat)
863 globbed = glob.glob(pat)
864 except re.error:
864 except re.error:
865 globbed = [pat]
865 globbed = [pat]
866 if globbed:
866 if globbed:
867 ret.extend(globbed)
867 ret.extend(globbed)
868 continue
868 continue
869 ret.append(kindpat)
869 ret.append(kindpat)
870 return ret
870 return ret
871
871
872
872
873 def matchandpats(
873 def matchandpats(
874 ctx, pats=(), opts=None, globbed=False, default=b'relpath', badfn=None
874 ctx, pats=(), opts=None, globbed=False, default=b'relpath', badfn=None
875 ):
875 ):
876 '''Return a matcher and the patterns that were used.
876 '''Return a matcher and the patterns that were used.
877 The matcher will warn about bad matches, unless an alternate badfn callback
877 The matcher will warn about bad matches, unless an alternate badfn callback
878 is provided.'''
878 is provided.'''
879 if opts is None:
879 if opts is None:
880 opts = {}
880 opts = {}
881 if not globbed and default == b'relpath':
881 if not globbed and default == b'relpath':
882 pats = expandpats(pats or [])
882 pats = expandpats(pats or [])
883
883
884 uipathfn = getuipathfn(ctx.repo(), legacyrelativevalue=True)
884 uipathfn = getuipathfn(ctx.repo(), legacyrelativevalue=True)
885
885
886 def bad(f, msg):
886 def bad(f, msg):
887 ctx.repo().ui.warn(b"%s: %s\n" % (uipathfn(f), msg))
887 ctx.repo().ui.warn(b"%s: %s\n" % (uipathfn(f), msg))
888
888
889 if badfn is None:
889 if badfn is None:
890 badfn = bad
890 badfn = bad
891
891
892 m = ctx.match(
892 m = ctx.match(
893 pats,
893 pats,
894 opts.get(b'include'),
894 opts.get(b'include'),
895 opts.get(b'exclude'),
895 opts.get(b'exclude'),
896 default,
896 default,
897 listsubrepos=opts.get(b'subrepos'),
897 listsubrepos=opts.get(b'subrepos'),
898 badfn=badfn,
898 badfn=badfn,
899 )
899 )
900
900
901 if m.always():
901 if m.always():
902 pats = []
902 pats = []
903 return m, pats
903 return m, pats
904
904
905
905
906 def match(
906 def match(
907 ctx, pats=(), opts=None, globbed=False, default=b'relpath', badfn=None
907 ctx, pats=(), opts=None, globbed=False, default=b'relpath', badfn=None
908 ):
908 ):
909 '''Return a matcher that will warn about bad matches.'''
909 '''Return a matcher that will warn about bad matches.'''
910 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
910 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
911
911
912
912
913 def matchall(repo):
913 def matchall(repo):
914 '''Return a matcher that will efficiently match everything.'''
914 '''Return a matcher that will efficiently match everything.'''
915 return matchmod.always()
915 return matchmod.always()
916
916
917
917
918 def matchfiles(repo, files, badfn=None):
918 def matchfiles(repo, files, badfn=None):
919 '''Return a matcher that will efficiently match exactly these files.'''
919 '''Return a matcher that will efficiently match exactly these files.'''
920 return matchmod.exact(files, badfn=badfn)
920 return matchmod.exact(files, badfn=badfn)
921
921
922
922
923 def parsefollowlinespattern(repo, rev, pat, msg):
923 def parsefollowlinespattern(repo, rev, pat, msg):
924 """Return a file name from `pat` pattern suitable for usage in followlines
924 """Return a file name from `pat` pattern suitable for usage in followlines
925 logic.
925 logic.
926 """
926 """
927 if not matchmod.patkind(pat):
927 if not matchmod.patkind(pat):
928 return pathutil.canonpath(repo.root, repo.getcwd(), pat)
928 return pathutil.canonpath(repo.root, repo.getcwd(), pat)
929 else:
929 else:
930 ctx = repo[rev]
930 ctx = repo[rev]
931 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=ctx)
931 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=ctx)
932 files = [f for f in ctx if m(f)]
932 files = [f for f in ctx if m(f)]
933 if len(files) != 1:
933 if len(files) != 1:
934 raise error.ParseError(msg)
934 raise error.ParseError(msg)
935 return files[0]
935 return files[0]
936
936
937
937
938 def getorigvfs(ui, repo):
938 def getorigvfs(ui, repo):
939 """return a vfs suitable to save 'orig' file
939 """return a vfs suitable to save 'orig' file
940
940
941 return None if no special directory is configured"""
941 return None if no special directory is configured"""
942 origbackuppath = ui.config(b'ui', b'origbackuppath')
942 origbackuppath = ui.config(b'ui', b'origbackuppath')
943 if not origbackuppath:
943 if not origbackuppath:
944 return None
944 return None
945 return vfs.vfs(repo.wvfs.join(origbackuppath))
945 return vfs.vfs(repo.wvfs.join(origbackuppath))
946
946
947
947
948 def backuppath(ui, repo, filepath):
948 def backuppath(ui, repo, filepath):
949 '''customize where working copy backup files (.orig files) are created
949 '''customize where working copy backup files (.orig files) are created
950
950
951 Fetch user defined path from config file: [ui] origbackuppath = <path>
951 Fetch user defined path from config file: [ui] origbackuppath = <path>
952 Fall back to default (filepath with .orig suffix) if not specified
952 Fall back to default (filepath with .orig suffix) if not specified
953
953
954 filepath is repo-relative
954 filepath is repo-relative
955
955
956 Returns an absolute path
956 Returns an absolute path
957 '''
957 '''
958 origvfs = getorigvfs(ui, repo)
958 origvfs = getorigvfs(ui, repo)
959 if origvfs is None:
959 if origvfs is None:
960 return repo.wjoin(filepath + b".orig")
960 return repo.wjoin(filepath + b".orig")
961
961
962 origbackupdir = origvfs.dirname(filepath)
962 origbackupdir = origvfs.dirname(filepath)
963 if not origvfs.isdir(origbackupdir) or origvfs.islink(origbackupdir):
963 if not origvfs.isdir(origbackupdir) or origvfs.islink(origbackupdir):
964 ui.note(_(b'creating directory: %s\n') % origvfs.join(origbackupdir))
964 ui.note(_(b'creating directory: %s\n') % origvfs.join(origbackupdir))
965
965
966 # Remove any files that conflict with the backup file's path
966 # Remove any files that conflict with the backup file's path
967 for f in reversed(list(util.finddirs(filepath))):
967 for f in reversed(list(util.finddirs(filepath))):
968 if origvfs.isfileorlink(f):
968 if origvfs.isfileorlink(f):
969 ui.note(_(b'removing conflicting file: %s\n') % origvfs.join(f))
969 ui.note(_(b'removing conflicting file: %s\n') % origvfs.join(f))
970 origvfs.unlink(f)
970 origvfs.unlink(f)
971 break
971 break
972
972
973 origvfs.makedirs(origbackupdir)
973 origvfs.makedirs(origbackupdir)
974
974
975 if origvfs.isdir(filepath) and not origvfs.islink(filepath):
975 if origvfs.isdir(filepath) and not origvfs.islink(filepath):
976 ui.note(
976 ui.note(
977 _(b'removing conflicting directory: %s\n') % origvfs.join(filepath)
977 _(b'removing conflicting directory: %s\n') % origvfs.join(filepath)
978 )
978 )
979 origvfs.rmtree(filepath, forcibly=True)
979 origvfs.rmtree(filepath, forcibly=True)
980
980
981 return origvfs.join(filepath)
981 return origvfs.join(filepath)
982
982
983
983
984 class _containsnode(object):
984 class _containsnode(object):
985 """proxy __contains__(node) to container.__contains__ which accepts revs"""
985 """proxy __contains__(node) to container.__contains__ which accepts revs"""
986
986
987 def __init__(self, repo, revcontainer):
987 def __init__(self, repo, revcontainer):
988 self._torev = repo.changelog.rev
988 self._torev = repo.changelog.rev
989 self._revcontains = revcontainer.__contains__
989 self._revcontains = revcontainer.__contains__
990
990
991 def __contains__(self, node):
991 def __contains__(self, node):
992 return self._revcontains(self._torev(node))
992 return self._revcontains(self._torev(node))
993
993
994
994
995 def cleanupnodes(
995 def cleanupnodes(
996 repo,
996 repo,
997 replacements,
997 replacements,
998 operation,
998 operation,
999 moves=None,
999 moves=None,
1000 metadata=None,
1000 metadata=None,
1001 fixphase=False,
1001 fixphase=False,
1002 targetphase=None,
1002 targetphase=None,
1003 backup=True,
1003 backup=True,
1004 ):
1004 ):
1005 """do common cleanups when old nodes are replaced by new nodes
1005 """do common cleanups when old nodes are replaced by new nodes
1006
1006
1007 That includes writing obsmarkers or stripping nodes, and moving bookmarks.
1007 That includes writing obsmarkers or stripping nodes, and moving bookmarks.
1008 (we might also want to move working directory parent in the future)
1008 (we might also want to move working directory parent in the future)
1009
1009
1010 By default, bookmark moves are calculated automatically from 'replacements',
1010 By default, bookmark moves are calculated automatically from 'replacements',
1011 but 'moves' can be used to override that. Also, 'moves' may include
1011 but 'moves' can be used to override that. Also, 'moves' may include
1012 additional bookmark moves that should not have associated obsmarkers.
1012 additional bookmark moves that should not have associated obsmarkers.
1013
1013
1014 replacements is {oldnode: [newnode]} or a iterable of nodes if they do not
1014 replacements is {oldnode: [newnode]} or a iterable of nodes if they do not
1015 have replacements. operation is a string, like "rebase".
1015 have replacements. operation is a string, like "rebase".
1016
1016
1017 metadata is dictionary containing metadata to be stored in obsmarker if
1017 metadata is dictionary containing metadata to be stored in obsmarker if
1018 obsolescence is enabled.
1018 obsolescence is enabled.
1019 """
1019 """
1020 assert fixphase or targetphase is None
1020 assert fixphase or targetphase is None
1021 if not replacements and not moves:
1021 if not replacements and not moves:
1022 return
1022 return
1023
1023
1024 # translate mapping's other forms
1024 # translate mapping's other forms
1025 if not util.safehasattr(replacements, b'items'):
1025 if not util.safehasattr(replacements, b'items'):
1026 replacements = {(n,): () for n in replacements}
1026 replacements = {(n,): () for n in replacements}
1027 else:
1027 else:
1028 # upgrading non tuple "source" to tuple ones for BC
1028 # upgrading non tuple "source" to tuple ones for BC
1029 repls = {}
1029 repls = {}
1030 for key, value in replacements.items():
1030 for key, value in replacements.items():
1031 if not isinstance(key, tuple):
1031 if not isinstance(key, tuple):
1032 key = (key,)
1032 key = (key,)
1033 repls[key] = value
1033 repls[key] = value
1034 replacements = repls
1034 replacements = repls
1035
1035
1036 # Unfiltered repo is needed since nodes in replacements might be hidden.
1036 # Unfiltered repo is needed since nodes in replacements might be hidden.
1037 unfi = repo.unfiltered()
1037 unfi = repo.unfiltered()
1038
1038
1039 # Calculate bookmark movements
1039 # Calculate bookmark movements
1040 if moves is None:
1040 if moves is None:
1041 moves = {}
1041 moves = {}
1042 for oldnodes, newnodes in replacements.items():
1042 for oldnodes, newnodes in replacements.items():
1043 for oldnode in oldnodes:
1043 for oldnode in oldnodes:
1044 if oldnode in moves:
1044 if oldnode in moves:
1045 continue
1045 continue
1046 if len(newnodes) > 1:
1046 if len(newnodes) > 1:
1047 # usually a split, take the one with biggest rev number
1047 # usually a split, take the one with biggest rev number
1048 newnode = next(unfi.set(b'max(%ln)', newnodes)).node()
1048 newnode = next(unfi.set(b'max(%ln)', newnodes)).node()
1049 elif len(newnodes) == 0:
1049 elif len(newnodes) == 0:
1050 # move bookmark backwards
1050 # move bookmark backwards
1051 allreplaced = []
1051 allreplaced = []
1052 for rep in replacements:
1052 for rep in replacements:
1053 allreplaced.extend(rep)
1053 allreplaced.extend(rep)
1054 roots = list(
1054 roots = list(
1055 unfi.set(b'max((::%n) - %ln)', oldnode, allreplaced)
1055 unfi.set(b'max((::%n) - %ln)', oldnode, allreplaced)
1056 )
1056 )
1057 if roots:
1057 if roots:
1058 newnode = roots[0].node()
1058 newnode = roots[0].node()
1059 else:
1059 else:
1060 newnode = nullid
1060 newnode = nullid
1061 else:
1061 else:
1062 newnode = newnodes[0]
1062 newnode = newnodes[0]
1063 moves[oldnode] = newnode
1063 moves[oldnode] = newnode
1064
1064
1065 allnewnodes = [n for ns in replacements.values() for n in ns]
1065 allnewnodes = [n for ns in replacements.values() for n in ns]
1066 toretract = {}
1066 toretract = {}
1067 toadvance = {}
1067 toadvance = {}
1068 if fixphase:
1068 if fixphase:
1069 precursors = {}
1069 precursors = {}
1070 for oldnodes, newnodes in replacements.items():
1070 for oldnodes, newnodes in replacements.items():
1071 for oldnode in oldnodes:
1071 for oldnode in oldnodes:
1072 for newnode in newnodes:
1072 for newnode in newnodes:
1073 precursors.setdefault(newnode, []).append(oldnode)
1073 precursors.setdefault(newnode, []).append(oldnode)
1074
1074
1075 allnewnodes.sort(key=lambda n: unfi[n].rev())
1075 allnewnodes.sort(key=lambda n: unfi[n].rev())
1076 newphases = {}
1076 newphases = {}
1077
1077
1078 def phase(ctx):
1078 def phase(ctx):
1079 return newphases.get(ctx.node(), ctx.phase())
1079 return newphases.get(ctx.node(), ctx.phase())
1080
1080
1081 for newnode in allnewnodes:
1081 for newnode in allnewnodes:
1082 ctx = unfi[newnode]
1082 ctx = unfi[newnode]
1083 parentphase = max(phase(p) for p in ctx.parents())
1083 parentphase = max(phase(p) for p in ctx.parents())
1084 if targetphase is None:
1084 if targetphase is None:
1085 oldphase = max(
1085 oldphase = max(
1086 unfi[oldnode].phase() for oldnode in precursors[newnode]
1086 unfi[oldnode].phase() for oldnode in precursors[newnode]
1087 )
1087 )
1088 newphase = max(oldphase, parentphase)
1088 newphase = max(oldphase, parentphase)
1089 else:
1089 else:
1090 newphase = max(targetphase, parentphase)
1090 newphase = max(targetphase, parentphase)
1091 newphases[newnode] = newphase
1091 newphases[newnode] = newphase
1092 if newphase > ctx.phase():
1092 if newphase > ctx.phase():
1093 toretract.setdefault(newphase, []).append(newnode)
1093 toretract.setdefault(newphase, []).append(newnode)
1094 elif newphase < ctx.phase():
1094 elif newphase < ctx.phase():
1095 toadvance.setdefault(newphase, []).append(newnode)
1095 toadvance.setdefault(newphase, []).append(newnode)
1096
1096
1097 with repo.transaction(b'cleanup') as tr:
1097 with repo.transaction(b'cleanup') as tr:
1098 # Move bookmarks
1098 # Move bookmarks
1099 bmarks = repo._bookmarks
1099 bmarks = repo._bookmarks
1100 bmarkchanges = []
1100 bmarkchanges = []
1101 for oldnode, newnode in moves.items():
1101 for oldnode, newnode in moves.items():
1102 oldbmarks = repo.nodebookmarks(oldnode)
1102 oldbmarks = repo.nodebookmarks(oldnode)
1103 if not oldbmarks:
1103 if not oldbmarks:
1104 continue
1104 continue
1105 from . import bookmarks # avoid import cycle
1105 from . import bookmarks # avoid import cycle
1106
1106
1107 repo.ui.debug(
1107 repo.ui.debug(
1108 b'moving bookmarks %r from %s to %s\n'
1108 b'moving bookmarks %r from %s to %s\n'
1109 % (
1109 % (
1110 pycompat.rapply(pycompat.maybebytestr, oldbmarks),
1110 pycompat.rapply(pycompat.maybebytestr, oldbmarks),
1111 hex(oldnode),
1111 hex(oldnode),
1112 hex(newnode),
1112 hex(newnode),
1113 )
1113 )
1114 )
1114 )
1115 # Delete divergent bookmarks being parents of related newnodes
1115 # Delete divergent bookmarks being parents of related newnodes
1116 deleterevs = repo.revs(
1116 deleterevs = repo.revs(
1117 b'parents(roots(%ln & (::%n))) - parents(%n)',
1117 b'parents(roots(%ln & (::%n))) - parents(%n)',
1118 allnewnodes,
1118 allnewnodes,
1119 newnode,
1119 newnode,
1120 oldnode,
1120 oldnode,
1121 )
1121 )
1122 deletenodes = _containsnode(repo, deleterevs)
1122 deletenodes = _containsnode(repo, deleterevs)
1123 for name in oldbmarks:
1123 for name in oldbmarks:
1124 bmarkchanges.append((name, newnode))
1124 bmarkchanges.append((name, newnode))
1125 for b in bookmarks.divergent2delete(repo, deletenodes, name):
1125 for b in bookmarks.divergent2delete(repo, deletenodes, name):
1126 bmarkchanges.append((b, None))
1126 bmarkchanges.append((b, None))
1127
1127
1128 if bmarkchanges:
1128 if bmarkchanges:
1129 bmarks.applychanges(repo, tr, bmarkchanges)
1129 bmarks.applychanges(repo, tr, bmarkchanges)
1130
1130
1131 for phase, nodes in toretract.items():
1131 for phase, nodes in toretract.items():
1132 phases.retractboundary(repo, tr, phase, nodes)
1132 phases.retractboundary(repo, tr, phase, nodes)
1133 for phase, nodes in toadvance.items():
1133 for phase, nodes in toadvance.items():
1134 phases.advanceboundary(repo, tr, phase, nodes)
1134 phases.advanceboundary(repo, tr, phase, nodes)
1135
1135
1136 mayusearchived = repo.ui.config(b'experimental', b'cleanup-as-archived')
1136 mayusearchived = repo.ui.config(b'experimental', b'cleanup-as-archived')
1137 # Obsolete or strip nodes
1137 # Obsolete or strip nodes
1138 if obsolete.isenabled(repo, obsolete.createmarkersopt):
1138 if obsolete.isenabled(repo, obsolete.createmarkersopt):
1139 # If a node is already obsoleted, and we want to obsolete it
1139 # If a node is already obsoleted, and we want to obsolete it
1140 # without a successor, skip that obssolete request since it's
1140 # without a successor, skip that obssolete request since it's
1141 # unnecessary. That's the "if s or not isobs(n)" check below.
1141 # unnecessary. That's the "if s or not isobs(n)" check below.
1142 # Also sort the node in topology order, that might be useful for
1142 # Also sort the node in topology order, that might be useful for
1143 # some obsstore logic.
1143 # some obsstore logic.
1144 # NOTE: the sorting might belong to createmarkers.
1144 # NOTE: the sorting might belong to createmarkers.
1145 torev = unfi.changelog.rev
1145 torev = unfi.changelog.rev
1146 sortfunc = lambda ns: torev(ns[0][0])
1146 sortfunc = lambda ns: torev(ns[0][0])
1147 rels = []
1147 rels = []
1148 for ns, s in sorted(replacements.items(), key=sortfunc):
1148 for ns, s in sorted(replacements.items(), key=sortfunc):
1149 rel = (tuple(unfi[n] for n in ns), tuple(unfi[m] for m in s))
1149 rel = (tuple(unfi[n] for n in ns), tuple(unfi[m] for m in s))
1150 rels.append(rel)
1150 rels.append(rel)
1151 if rels:
1151 if rels:
1152 obsolete.createmarkers(
1152 obsolete.createmarkers(
1153 repo, rels, operation=operation, metadata=metadata
1153 repo, rels, operation=operation, metadata=metadata
1154 )
1154 )
1155 elif phases.supportinternal(repo) and mayusearchived:
1155 elif phases.supportinternal(repo) and mayusearchived:
1156 # this assume we do not have "unstable" nodes above the cleaned ones
1156 # this assume we do not have "unstable" nodes above the cleaned ones
1157 allreplaced = set()
1157 allreplaced = set()
1158 for ns in replacements.keys():
1158 for ns in replacements.keys():
1159 allreplaced.update(ns)
1159 allreplaced.update(ns)
1160 if backup:
1160 if backup:
1161 from . import repair # avoid import cycle
1161 from . import repair # avoid import cycle
1162
1162
1163 node = min(allreplaced, key=repo.changelog.rev)
1163 node = min(allreplaced, key=repo.changelog.rev)
1164 repair.backupbundle(
1164 repair.backupbundle(
1165 repo, allreplaced, allreplaced, node, operation
1165 repo, allreplaced, allreplaced, node, operation
1166 )
1166 )
1167 phases.retractboundary(repo, tr, phases.archived, allreplaced)
1167 phases.retractboundary(repo, tr, phases.archived, allreplaced)
1168 else:
1168 else:
1169 from . import repair # avoid import cycle
1169 from . import repair # avoid import cycle
1170
1170
1171 tostrip = list(n for ns in replacements for n in ns)
1171 tostrip = list(n for ns in replacements for n in ns)
1172 if tostrip:
1172 if tostrip:
1173 repair.delayedstrip(
1173 repair.delayedstrip(
1174 repo.ui, repo, tostrip, operation, backup=backup
1174 repo.ui, repo, tostrip, operation, backup=backup
1175 )
1175 )
1176
1176
1177
1177
1178 def addremove(repo, matcher, prefix, uipathfn, opts=None):
1178 def addremove(repo, matcher, prefix, uipathfn, opts=None):
1179 if opts is None:
1179 if opts is None:
1180 opts = {}
1180 opts = {}
1181 m = matcher
1181 m = matcher
1182 dry_run = opts.get(b'dry_run')
1182 dry_run = opts.get(b'dry_run')
1183 try:
1183 try:
1184 similarity = float(opts.get(b'similarity') or 0)
1184 similarity = float(opts.get(b'similarity') or 0)
1185 except ValueError:
1185 except ValueError:
1186 raise error.Abort(_(b'similarity must be a number'))
1186 raise error.Abort(_(b'similarity must be a number'))
1187 if similarity < 0 or similarity > 100:
1187 if similarity < 0 or similarity > 100:
1188 raise error.Abort(_(b'similarity must be between 0 and 100'))
1188 raise error.Abort(_(b'similarity must be between 0 and 100'))
1189 similarity /= 100.0
1189 similarity /= 100.0
1190
1190
1191 ret = 0
1191 ret = 0
1192
1192
1193 wctx = repo[None]
1193 wctx = repo[None]
1194 for subpath in sorted(wctx.substate):
1194 for subpath in sorted(wctx.substate):
1195 submatch = matchmod.subdirmatcher(subpath, m)
1195 submatch = matchmod.subdirmatcher(subpath, m)
1196 if opts.get(b'subrepos') or m.exact(subpath) or any(submatch.files()):
1196 if opts.get(b'subrepos') or m.exact(subpath) or any(submatch.files()):
1197 sub = wctx.sub(subpath)
1197 sub = wctx.sub(subpath)
1198 subprefix = repo.wvfs.reljoin(prefix, subpath)
1198 subprefix = repo.wvfs.reljoin(prefix, subpath)
1199 subuipathfn = subdiruipathfn(subpath, uipathfn)
1199 subuipathfn = subdiruipathfn(subpath, uipathfn)
1200 try:
1200 try:
1201 if sub.addremove(submatch, subprefix, subuipathfn, opts):
1201 if sub.addremove(submatch, subprefix, subuipathfn, opts):
1202 ret = 1
1202 ret = 1
1203 except error.LookupError:
1203 except error.LookupError:
1204 repo.ui.status(
1204 repo.ui.status(
1205 _(b"skipping missing subrepository: %s\n")
1205 _(b"skipping missing subrepository: %s\n")
1206 % uipathfn(subpath)
1206 % uipathfn(subpath)
1207 )
1207 )
1208
1208
1209 rejected = []
1209 rejected = []
1210
1210
1211 def badfn(f, msg):
1211 def badfn(f, msg):
1212 if f in m.files():
1212 if f in m.files():
1213 m.bad(f, msg)
1213 m.bad(f, msg)
1214 rejected.append(f)
1214 rejected.append(f)
1215
1215
1216 badmatch = matchmod.badmatch(m, badfn)
1216 badmatch = matchmod.badmatch(m, badfn)
1217 added, unknown, deleted, removed, forgotten = _interestingfiles(
1217 added, unknown, deleted, removed, forgotten = _interestingfiles(
1218 repo, badmatch
1218 repo, badmatch
1219 )
1219 )
1220
1220
1221 unknownset = set(unknown + forgotten)
1221 unknownset = set(unknown + forgotten)
1222 toprint = unknownset.copy()
1222 toprint = unknownset.copy()
1223 toprint.update(deleted)
1223 toprint.update(deleted)
1224 for abs in sorted(toprint):
1224 for abs in sorted(toprint):
1225 if repo.ui.verbose or not m.exact(abs):
1225 if repo.ui.verbose or not m.exact(abs):
1226 if abs in unknownset:
1226 if abs in unknownset:
1227 status = _(b'adding %s\n') % uipathfn(abs)
1227 status = _(b'adding %s\n') % uipathfn(abs)
1228 label = b'ui.addremove.added'
1228 label = b'ui.addremove.added'
1229 else:
1229 else:
1230 status = _(b'removing %s\n') % uipathfn(abs)
1230 status = _(b'removing %s\n') % uipathfn(abs)
1231 label = b'ui.addremove.removed'
1231 label = b'ui.addremove.removed'
1232 repo.ui.status(status, label=label)
1232 repo.ui.status(status, label=label)
1233
1233
1234 renames = _findrenames(
1234 renames = _findrenames(
1235 repo, m, added + unknown, removed + deleted, similarity, uipathfn
1235 repo, m, added + unknown, removed + deleted, similarity, uipathfn
1236 )
1236 )
1237
1237
1238 if not dry_run:
1238 if not dry_run:
1239 _markchanges(repo, unknown + forgotten, deleted, renames)
1239 _markchanges(repo, unknown + forgotten, deleted, renames)
1240
1240
1241 for f in rejected:
1241 for f in rejected:
1242 if f in m.files():
1242 if f in m.files():
1243 return 1
1243 return 1
1244 return ret
1244 return ret
1245
1245
1246
1246
1247 def marktouched(repo, files, similarity=0.0):
1247 def marktouched(repo, files, similarity=0.0):
1248 '''Assert that files have somehow been operated upon. files are relative to
1248 '''Assert that files have somehow been operated upon. files are relative to
1249 the repo root.'''
1249 the repo root.'''
1250 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
1250 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
1251 rejected = []
1251 rejected = []
1252
1252
1253 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
1253 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
1254
1254
1255 if repo.ui.verbose:
1255 if repo.ui.verbose:
1256 unknownset = set(unknown + forgotten)
1256 unknownset = set(unknown + forgotten)
1257 toprint = unknownset.copy()
1257 toprint = unknownset.copy()
1258 toprint.update(deleted)
1258 toprint.update(deleted)
1259 for abs in sorted(toprint):
1259 for abs in sorted(toprint):
1260 if abs in unknownset:
1260 if abs in unknownset:
1261 status = _(b'adding %s\n') % abs
1261 status = _(b'adding %s\n') % abs
1262 else:
1262 else:
1263 status = _(b'removing %s\n') % abs
1263 status = _(b'removing %s\n') % abs
1264 repo.ui.status(status)
1264 repo.ui.status(status)
1265
1265
1266 # TODO: We should probably have the caller pass in uipathfn and apply it to
1266 # TODO: We should probably have the caller pass in uipathfn and apply it to
1267 # the messages above too. legacyrelativevalue=True is consistent with how
1267 # the messages above too. legacyrelativevalue=True is consistent with how
1268 # it used to work.
1268 # it used to work.
1269 uipathfn = getuipathfn(repo, legacyrelativevalue=True)
1269 uipathfn = getuipathfn(repo, legacyrelativevalue=True)
1270 renames = _findrenames(
1270 renames = _findrenames(
1271 repo, m, added + unknown, removed + deleted, similarity, uipathfn
1271 repo, m, added + unknown, removed + deleted, similarity, uipathfn
1272 )
1272 )
1273
1273
1274 _markchanges(repo, unknown + forgotten, deleted, renames)
1274 _markchanges(repo, unknown + forgotten, deleted, renames)
1275
1275
1276 for f in rejected:
1276 for f in rejected:
1277 if f in m.files():
1277 if f in m.files():
1278 return 1
1278 return 1
1279 return 0
1279 return 0
1280
1280
1281
1281
1282 def _interestingfiles(repo, matcher):
1282 def _interestingfiles(repo, matcher):
1283 '''Walk dirstate with matcher, looking for files that addremove would care
1283 '''Walk dirstate with matcher, looking for files that addremove would care
1284 about.
1284 about.
1285
1285
1286 This is different from dirstate.status because it doesn't care about
1286 This is different from dirstate.status because it doesn't care about
1287 whether files are modified or clean.'''
1287 whether files are modified or clean.'''
1288 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1288 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1289 audit_path = pathutil.pathauditor(repo.root, cached=True)
1289 audit_path = pathutil.pathauditor(repo.root, cached=True)
1290
1290
1291 ctx = repo[None]
1291 ctx = repo[None]
1292 dirstate = repo.dirstate
1292 dirstate = repo.dirstate
1293 matcher = repo.narrowmatch(matcher, includeexact=True)
1293 matcher = repo.narrowmatch(matcher, includeexact=True)
1294 walkresults = dirstate.walk(
1294 walkresults = dirstate.walk(
1295 matcher,
1295 matcher,
1296 subrepos=sorted(ctx.substate),
1296 subrepos=sorted(ctx.substate),
1297 unknown=True,
1297 unknown=True,
1298 ignored=False,
1298 ignored=False,
1299 full=False,
1299 full=False,
1300 )
1300 )
1301 for abs, st in pycompat.iteritems(walkresults):
1301 for abs, st in pycompat.iteritems(walkresults):
1302 dstate = dirstate[abs]
1302 dstate = dirstate[abs]
1303 if dstate == b'?' and audit_path.check(abs):
1303 if dstate == b'?' and audit_path.check(abs):
1304 unknown.append(abs)
1304 unknown.append(abs)
1305 elif dstate != b'r' and not st:
1305 elif dstate != b'r' and not st:
1306 deleted.append(abs)
1306 deleted.append(abs)
1307 elif dstate == b'r' and st:
1307 elif dstate == b'r' and st:
1308 forgotten.append(abs)
1308 forgotten.append(abs)
1309 # for finding renames
1309 # for finding renames
1310 elif dstate == b'r' and not st:
1310 elif dstate == b'r' and not st:
1311 removed.append(abs)
1311 removed.append(abs)
1312 elif dstate == b'a':
1312 elif dstate == b'a':
1313 added.append(abs)
1313 added.append(abs)
1314
1314
1315 return added, unknown, deleted, removed, forgotten
1315 return added, unknown, deleted, removed, forgotten
1316
1316
1317
1317
1318 def _findrenames(repo, matcher, added, removed, similarity, uipathfn):
1318 def _findrenames(repo, matcher, added, removed, similarity, uipathfn):
1319 '''Find renames from removed files to added ones.'''
1319 '''Find renames from removed files to added ones.'''
1320 renames = {}
1320 renames = {}
1321 if similarity > 0:
1321 if similarity > 0:
1322 for old, new, score in similar.findrenames(
1322 for old, new, score in similar.findrenames(
1323 repo, added, removed, similarity
1323 repo, added, removed, similarity
1324 ):
1324 ):
1325 if (
1325 if (
1326 repo.ui.verbose
1326 repo.ui.verbose
1327 or not matcher.exact(old)
1327 or not matcher.exact(old)
1328 or not matcher.exact(new)
1328 or not matcher.exact(new)
1329 ):
1329 ):
1330 repo.ui.status(
1330 repo.ui.status(
1331 _(
1331 _(
1332 b'recording removal of %s as rename to %s '
1332 b'recording removal of %s as rename to %s '
1333 b'(%d%% similar)\n'
1333 b'(%d%% similar)\n'
1334 )
1334 )
1335 % (uipathfn(old), uipathfn(new), score * 100)
1335 % (uipathfn(old), uipathfn(new), score * 100)
1336 )
1336 )
1337 renames[new] = old
1337 renames[new] = old
1338 return renames
1338 return renames
1339
1339
1340
1340
1341 def _markchanges(repo, unknown, deleted, renames):
1341 def _markchanges(repo, unknown, deleted, renames):
1342 '''Marks the files in unknown as added, the files in deleted as removed,
1342 '''Marks the files in unknown as added, the files in deleted as removed,
1343 and the files in renames as copied.'''
1343 and the files in renames as copied.'''
1344 wctx = repo[None]
1344 wctx = repo[None]
1345 with repo.wlock():
1345 with repo.wlock():
1346 wctx.forget(deleted)
1346 wctx.forget(deleted)
1347 wctx.add(unknown)
1347 wctx.add(unknown)
1348 for new, old in pycompat.iteritems(renames):
1348 for new, old in pycompat.iteritems(renames):
1349 wctx.copy(old, new)
1349 wctx.copy(old, new)
1350
1350
1351
1351
1352 def getrenamedfn(repo, endrev=None):
1352 def getrenamedfn(repo, endrev=None):
1353 if copiesmod.usechangesetcentricalgo(repo):
1353 if copiesmod.usechangesetcentricalgo(repo):
1354
1354
1355 def getrenamed(fn, rev):
1355 def getrenamed(fn, rev):
1356 ctx = repo[rev]
1356 ctx = repo[rev]
1357 p1copies = ctx.p1copies()
1357 p1copies = ctx.p1copies()
1358 if fn in p1copies:
1358 if fn in p1copies:
1359 return p1copies[fn]
1359 return p1copies[fn]
1360 p2copies = ctx.p2copies()
1360 p2copies = ctx.p2copies()
1361 if fn in p2copies:
1361 if fn in p2copies:
1362 return p2copies[fn]
1362 return p2copies[fn]
1363 return None
1363 return None
1364
1364
1365 return getrenamed
1365 return getrenamed
1366
1366
1367 rcache = {}
1367 rcache = {}
1368 if endrev is None:
1368 if endrev is None:
1369 endrev = len(repo)
1369 endrev = len(repo)
1370
1370
1371 def getrenamed(fn, rev):
1371 def getrenamed(fn, rev):
1372 '''looks up all renames for a file (up to endrev) the first
1372 '''looks up all renames for a file (up to endrev) the first
1373 time the file is given. It indexes on the changerev and only
1373 time the file is given. It indexes on the changerev and only
1374 parses the manifest if linkrev != changerev.
1374 parses the manifest if linkrev != changerev.
1375 Returns rename info for fn at changerev rev.'''
1375 Returns rename info for fn at changerev rev.'''
1376 if fn not in rcache:
1376 if fn not in rcache:
1377 rcache[fn] = {}
1377 rcache[fn] = {}
1378 fl = repo.file(fn)
1378 fl = repo.file(fn)
1379 for i in fl:
1379 for i in fl:
1380 lr = fl.linkrev(i)
1380 lr = fl.linkrev(i)
1381 renamed = fl.renamed(fl.node(i))
1381 renamed = fl.renamed(fl.node(i))
1382 rcache[fn][lr] = renamed and renamed[0]
1382 rcache[fn][lr] = renamed and renamed[0]
1383 if lr >= endrev:
1383 if lr >= endrev:
1384 break
1384 break
1385 if rev in rcache[fn]:
1385 if rev in rcache[fn]:
1386 return rcache[fn][rev]
1386 return rcache[fn][rev]
1387
1387
1388 # If linkrev != rev (i.e. rev not found in rcache) fallback to
1388 # If linkrev != rev (i.e. rev not found in rcache) fallback to
1389 # filectx logic.
1389 # filectx logic.
1390 try:
1390 try:
1391 return repo[rev][fn].copysource()
1391 return repo[rev][fn].copysource()
1392 except error.LookupError:
1392 except error.LookupError:
1393 return None
1393 return None
1394
1394
1395 return getrenamed
1395 return getrenamed
1396
1396
1397
1397
1398 def getcopiesfn(repo, endrev=None):
1398 def getcopiesfn(repo, endrev=None):
1399 if copiesmod.usechangesetcentricalgo(repo):
1399 if copiesmod.usechangesetcentricalgo(repo):
1400
1400
1401 def copiesfn(ctx):
1401 def copiesfn(ctx):
1402 if ctx.p2copies():
1402 if ctx.p2copies():
1403 allcopies = ctx.p1copies().copy()
1403 allcopies = ctx.p1copies().copy()
1404 # There should be no overlap
1404 # There should be no overlap
1405 allcopies.update(ctx.p2copies())
1405 allcopies.update(ctx.p2copies())
1406 return sorted(allcopies.items())
1406 return sorted(allcopies.items())
1407 else:
1407 else:
1408 return sorted(ctx.p1copies().items())
1408 return sorted(ctx.p1copies().items())
1409
1409
1410 else:
1410 else:
1411 getrenamed = getrenamedfn(repo, endrev)
1411 getrenamed = getrenamedfn(repo, endrev)
1412
1412
1413 def copiesfn(ctx):
1413 def copiesfn(ctx):
1414 copies = []
1414 copies = []
1415 for fn in ctx.files():
1415 for fn in ctx.files():
1416 rename = getrenamed(fn, ctx.rev())
1416 rename = getrenamed(fn, ctx.rev())
1417 if rename:
1417 if rename:
1418 copies.append((fn, rename))
1418 copies.append((fn, rename))
1419 return copies
1419 return copies
1420
1420
1421 return copiesfn
1421 return copiesfn
1422
1422
1423
1423
1424 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1424 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1425 """Update the dirstate to reflect the intent of copying src to dst. For
1425 """Update the dirstate to reflect the intent of copying src to dst. For
1426 different reasons it might not end with dst being marked as copied from src.
1426 different reasons it might not end with dst being marked as copied from src.
1427 """
1427 """
1428 origsrc = repo.dirstate.copied(src) or src
1428 origsrc = repo.dirstate.copied(src) or src
1429 if dst == origsrc: # copying back a copy?
1429 if dst == origsrc: # copying back a copy?
1430 if repo.dirstate[dst] not in b'mn' and not dryrun:
1430 if repo.dirstate[dst] not in b'mn' and not dryrun:
1431 repo.dirstate.normallookup(dst)
1431 repo.dirstate.normallookup(dst)
1432 else:
1432 else:
1433 if repo.dirstate[origsrc] == b'a' and origsrc == src:
1433 if repo.dirstate[origsrc] == b'a' and origsrc == src:
1434 if not ui.quiet:
1434 if not ui.quiet:
1435 ui.warn(
1435 ui.warn(
1436 _(
1436 _(
1437 b"%s has not been committed yet, so no copy "
1437 b"%s has not been committed yet, so no copy "
1438 b"data will be stored for %s.\n"
1438 b"data will be stored for %s.\n"
1439 )
1439 )
1440 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd))
1440 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd))
1441 )
1441 )
1442 if repo.dirstate[dst] in b'?r' and not dryrun:
1442 if repo.dirstate[dst] in b'?r' and not dryrun:
1443 wctx.add([dst])
1443 wctx.add([dst])
1444 elif not dryrun:
1444 elif not dryrun:
1445 wctx.copy(origsrc, dst)
1445 wctx.copy(origsrc, dst)
1446
1446
1447
1447
1448 def movedirstate(repo, newctx, match=None):
1448 def movedirstate(repo, newctx, match=None):
1449 """Move the dirstate to newctx and adjust it as necessary.
1449 """Move the dirstate to newctx and adjust it as necessary.
1450
1450
1451 A matcher can be provided as an optimization. It is probably a bug to pass
1451 A matcher can be provided as an optimization. It is probably a bug to pass
1452 a matcher that doesn't match all the differences between the parent of the
1452 a matcher that doesn't match all the differences between the parent of the
1453 working copy and newctx.
1453 working copy and newctx.
1454 """
1454 """
1455 oldctx = repo[b'.']
1455 oldctx = repo[b'.']
1456 ds = repo.dirstate
1456 ds = repo.dirstate
1457 ds.setparents(newctx.node(), nullid)
1457 ds.setparents(newctx.node(), nullid)
1458 copies = dict(ds.copies())
1458 copies = dict(ds.copies())
1459 s = newctx.status(oldctx, match=match)
1459 s = newctx.status(oldctx, match=match)
1460 for f in s.modified:
1460 for f in s.modified:
1461 if ds[f] == b'r':
1461 if ds[f] == b'r':
1462 # modified + removed -> removed
1462 # modified + removed -> removed
1463 continue
1463 continue
1464 ds.normallookup(f)
1464 ds.normallookup(f)
1465
1465
1466 for f in s.added:
1466 for f in s.added:
1467 if ds[f] == b'r':
1467 if ds[f] == b'r':
1468 # added + removed -> unknown
1468 # added + removed -> unknown
1469 ds.drop(f)
1469 ds.drop(f)
1470 elif ds[f] != b'a':
1470 elif ds[f] != b'a':
1471 ds.add(f)
1471 ds.add(f)
1472
1472
1473 for f in s.removed:
1473 for f in s.removed:
1474 if ds[f] == b'a':
1474 if ds[f] == b'a':
1475 # removed + added -> normal
1475 # removed + added -> normal
1476 ds.normallookup(f)
1476 ds.normallookup(f)
1477 elif ds[f] != b'r':
1477 elif ds[f] != b'r':
1478 ds.remove(f)
1478 ds.remove(f)
1479
1479
1480 # Merge old parent and old working dir copies
1480 # Merge old parent and old working dir copies
1481 oldcopies = copiesmod.pathcopies(newctx, oldctx, match)
1481 oldcopies = copiesmod.pathcopies(newctx, oldctx, match)
1482 oldcopies.update(copies)
1482 oldcopies.update(copies)
1483 copies = dict(
1483 copies = dict(
1484 (dst, oldcopies.get(src, src))
1484 (dst, oldcopies.get(src, src))
1485 for dst, src in pycompat.iteritems(oldcopies)
1485 for dst, src in pycompat.iteritems(oldcopies)
1486 )
1486 )
1487 # Adjust the dirstate copies
1487 # Adjust the dirstate copies
1488 for dst, src in pycompat.iteritems(copies):
1488 for dst, src in pycompat.iteritems(copies):
1489 if src not in newctx or dst in newctx or ds[dst] != b'a':
1489 if src not in newctx or dst in newctx or ds[dst] != b'a':
1490 src = None
1490 src = None
1491 ds.copy(src, dst)
1491 ds.copy(src, dst)
1492
1492
1493
1493
1494 def writerequires(opener, requirements):
1494 def writerequires(opener, requirements):
1495 with opener(b'requires', b'w', atomictemp=True) as fp:
1495 with opener(b'requires', b'w', atomictemp=True) as fp:
1496 for r in sorted(requirements):
1496 for r in sorted(requirements):
1497 fp.write(b"%s\n" % r)
1497 fp.write(b"%s\n" % r)
1498
1498
1499
1499
1500 class filecachesubentry(object):
1500 class filecachesubentry(object):
1501 def __init__(self, path, stat):
1501 def __init__(self, path, stat):
1502 self.path = path
1502 self.path = path
1503 self.cachestat = None
1503 self.cachestat = None
1504 self._cacheable = None
1504 self._cacheable = None
1505
1505
1506 if stat:
1506 if stat:
1507 self.cachestat = filecachesubentry.stat(self.path)
1507 self.cachestat = filecachesubentry.stat(self.path)
1508
1508
1509 if self.cachestat:
1509 if self.cachestat:
1510 self._cacheable = self.cachestat.cacheable()
1510 self._cacheable = self.cachestat.cacheable()
1511 else:
1511 else:
1512 # None means we don't know yet
1512 # None means we don't know yet
1513 self._cacheable = None
1513 self._cacheable = None
1514
1514
1515 def refresh(self):
1515 def refresh(self):
1516 if self.cacheable():
1516 if self.cacheable():
1517 self.cachestat = filecachesubentry.stat(self.path)
1517 self.cachestat = filecachesubentry.stat(self.path)
1518
1518
1519 def cacheable(self):
1519 def cacheable(self):
1520 if self._cacheable is not None:
1520 if self._cacheable is not None:
1521 return self._cacheable
1521 return self._cacheable
1522
1522
1523 # we don't know yet, assume it is for now
1523 # we don't know yet, assume it is for now
1524 return True
1524 return True
1525
1525
1526 def changed(self):
1526 def changed(self):
1527 # no point in going further if we can't cache it
1527 # no point in going further if we can't cache it
1528 if not self.cacheable():
1528 if not self.cacheable():
1529 return True
1529 return True
1530
1530
1531 newstat = filecachesubentry.stat(self.path)
1531 newstat = filecachesubentry.stat(self.path)
1532
1532
1533 # we may not know if it's cacheable yet, check again now
1533 # we may not know if it's cacheable yet, check again now
1534 if newstat and self._cacheable is None:
1534 if newstat and self._cacheable is None:
1535 self._cacheable = newstat.cacheable()
1535 self._cacheable = newstat.cacheable()
1536
1536
1537 # check again
1537 # check again
1538 if not self._cacheable:
1538 if not self._cacheable:
1539 return True
1539 return True
1540
1540
1541 if self.cachestat != newstat:
1541 if self.cachestat != newstat:
1542 self.cachestat = newstat
1542 self.cachestat = newstat
1543 return True
1543 return True
1544 else:
1544 else:
1545 return False
1545 return False
1546
1546
1547 @staticmethod
1547 @staticmethod
1548 def stat(path):
1548 def stat(path):
1549 try:
1549 try:
1550 return util.cachestat(path)
1550 return util.cachestat(path)
1551 except OSError as e:
1551 except OSError as e:
1552 if e.errno != errno.ENOENT:
1552 if e.errno != errno.ENOENT:
1553 raise
1553 raise
1554
1554
1555
1555
1556 class filecacheentry(object):
1556 class filecacheentry(object):
1557 def __init__(self, paths, stat=True):
1557 def __init__(self, paths, stat=True):
1558 self._entries = []
1558 self._entries = []
1559 for path in paths:
1559 for path in paths:
1560 self._entries.append(filecachesubentry(path, stat))
1560 self._entries.append(filecachesubentry(path, stat))
1561
1561
1562 def changed(self):
1562 def changed(self):
1563 '''true if any entry has changed'''
1563 '''true if any entry has changed'''
1564 for entry in self._entries:
1564 for entry in self._entries:
1565 if entry.changed():
1565 if entry.changed():
1566 return True
1566 return True
1567 return False
1567 return False
1568
1568
1569 def refresh(self):
1569 def refresh(self):
1570 for entry in self._entries:
1570 for entry in self._entries:
1571 entry.refresh()
1571 entry.refresh()
1572
1572
1573
1573
1574 class filecache(object):
1574 class filecache(object):
1575 """A property like decorator that tracks files under .hg/ for updates.
1575 """A property like decorator that tracks files under .hg/ for updates.
1576
1576
1577 On first access, the files defined as arguments are stat()ed and the
1577 On first access, the files defined as arguments are stat()ed and the
1578 results cached. The decorated function is called. The results are stashed
1578 results cached. The decorated function is called. The results are stashed
1579 away in a ``_filecache`` dict on the object whose method is decorated.
1579 away in a ``_filecache`` dict on the object whose method is decorated.
1580
1580
1581 On subsequent access, the cached result is used as it is set to the
1581 On subsequent access, the cached result is used as it is set to the
1582 instance dictionary.
1582 instance dictionary.
1583
1583
1584 On external property set/delete operations, the caller must update the
1584 On external property set/delete operations, the caller must update the
1585 corresponding _filecache entry appropriately. Use __class__.<attr>.set()
1585 corresponding _filecache entry appropriately. Use __class__.<attr>.set()
1586 instead of directly setting <attr>.
1586 instead of directly setting <attr>.
1587
1587
1588 When using the property API, the cached data is always used if available.
1588 When using the property API, the cached data is always used if available.
1589 No stat() is performed to check if the file has changed.
1589 No stat() is performed to check if the file has changed.
1590
1590
1591 Others can muck about with the state of the ``_filecache`` dict. e.g. they
1591 Others can muck about with the state of the ``_filecache`` dict. e.g. they
1592 can populate an entry before the property's getter is called. In this case,
1592 can populate an entry before the property's getter is called. In this case,
1593 entries in ``_filecache`` will be used during property operations,
1593 entries in ``_filecache`` will be used during property operations,
1594 if available. If the underlying file changes, it is up to external callers
1594 if available. If the underlying file changes, it is up to external callers
1595 to reflect this by e.g. calling ``delattr(obj, attr)`` to remove the cached
1595 to reflect this by e.g. calling ``delattr(obj, attr)`` to remove the cached
1596 method result as well as possibly calling ``del obj._filecache[attr]`` to
1596 method result as well as possibly calling ``del obj._filecache[attr]`` to
1597 remove the ``filecacheentry``.
1597 remove the ``filecacheentry``.
1598 """
1598 """
1599
1599
1600 def __init__(self, *paths):
1600 def __init__(self, *paths):
1601 self.paths = paths
1601 self.paths = paths
1602
1602
1603 def join(self, obj, fname):
1603 def join(self, obj, fname):
1604 """Used to compute the runtime path of a cached file.
1604 """Used to compute the runtime path of a cached file.
1605
1605
1606 Users should subclass filecache and provide their own version of this
1606 Users should subclass filecache and provide their own version of this
1607 function to call the appropriate join function on 'obj' (an instance
1607 function to call the appropriate join function on 'obj' (an instance
1608 of the class that its member function was decorated).
1608 of the class that its member function was decorated).
1609 """
1609 """
1610 raise NotImplementedError
1610 raise NotImplementedError
1611
1611
1612 def __call__(self, func):
1612 def __call__(self, func):
1613 self.func = func
1613 self.func = func
1614 self.sname = func.__name__
1614 self.sname = func.__name__
1615 self.name = pycompat.sysbytes(self.sname)
1615 self.name = pycompat.sysbytes(self.sname)
1616 return self
1616 return self
1617
1617
1618 def __get__(self, obj, type=None):
1618 def __get__(self, obj, type=None):
1619 # if accessed on the class, return the descriptor itself.
1619 # if accessed on the class, return the descriptor itself.
1620 if obj is None:
1620 if obj is None:
1621 return self
1621 return self
1622
1622
1623 assert self.sname not in obj.__dict__
1623 assert self.sname not in obj.__dict__
1624
1624
1625 entry = obj._filecache.get(self.name)
1625 entry = obj._filecache.get(self.name)
1626
1626
1627 if entry:
1627 if entry:
1628 if entry.changed():
1628 if entry.changed():
1629 entry.obj = self.func(obj)
1629 entry.obj = self.func(obj)
1630 else:
1630 else:
1631 paths = [self.join(obj, path) for path in self.paths]
1631 paths = [self.join(obj, path) for path in self.paths]
1632
1632
1633 # We stat -before- creating the object so our cache doesn't lie if
1633 # We stat -before- creating the object so our cache doesn't lie if
1634 # a writer modified between the time we read and stat
1634 # a writer modified between the time we read and stat
1635 entry = filecacheentry(paths, True)
1635 entry = filecacheentry(paths, True)
1636 entry.obj = self.func(obj)
1636 entry.obj = self.func(obj)
1637
1637
1638 obj._filecache[self.name] = entry
1638 obj._filecache[self.name] = entry
1639
1639
1640 obj.__dict__[self.sname] = entry.obj
1640 obj.__dict__[self.sname] = entry.obj
1641 return entry.obj
1641 return entry.obj
1642
1642
1643 # don't implement __set__(), which would make __dict__ lookup as slow as
1643 # don't implement __set__(), which would make __dict__ lookup as slow as
1644 # function call.
1644 # function call.
1645
1645
1646 def set(self, obj, value):
1646 def set(self, obj, value):
1647 if self.name not in obj._filecache:
1647 if self.name not in obj._filecache:
1648 # we add an entry for the missing value because X in __dict__
1648 # we add an entry for the missing value because X in __dict__
1649 # implies X in _filecache
1649 # implies X in _filecache
1650 paths = [self.join(obj, path) for path in self.paths]
1650 paths = [self.join(obj, path) for path in self.paths]
1651 ce = filecacheentry(paths, False)
1651 ce = filecacheentry(paths, False)
1652 obj._filecache[self.name] = ce
1652 obj._filecache[self.name] = ce
1653 else:
1653 else:
1654 ce = obj._filecache[self.name]
1654 ce = obj._filecache[self.name]
1655
1655
1656 ce.obj = value # update cached copy
1656 ce.obj = value # update cached copy
1657 obj.__dict__[self.sname] = value # update copy returned by obj.x
1657 obj.__dict__[self.sname] = value # update copy returned by obj.x
1658
1658
1659
1659
1660 def extdatasource(repo, source):
1660 def extdatasource(repo, source):
1661 """Gather a map of rev -> value dict from the specified source
1661 """Gather a map of rev -> value dict from the specified source
1662
1662
1663 A source spec is treated as a URL, with a special case shell: type
1663 A source spec is treated as a URL, with a special case shell: type
1664 for parsing the output from a shell command.
1664 for parsing the output from a shell command.
1665
1665
1666 The data is parsed as a series of newline-separated records where
1666 The data is parsed as a series of newline-separated records where
1667 each record is a revision specifier optionally followed by a space
1667 each record is a revision specifier optionally followed by a space
1668 and a freeform string value. If the revision is known locally, it
1668 and a freeform string value. If the revision is known locally, it
1669 is converted to a rev, otherwise the record is skipped.
1669 is converted to a rev, otherwise the record is skipped.
1670
1670
1671 Note that both key and value are treated as UTF-8 and converted to
1671 Note that both key and value are treated as UTF-8 and converted to
1672 the local encoding. This allows uniformity between local and
1672 the local encoding. This allows uniformity between local and
1673 remote data sources.
1673 remote data sources.
1674 """
1674 """
1675
1675
1676 spec = repo.ui.config(b"extdata", source)
1676 spec = repo.ui.config(b"extdata", source)
1677 if not spec:
1677 if not spec:
1678 raise error.Abort(_(b"unknown extdata source '%s'") % source)
1678 raise error.Abort(_(b"unknown extdata source '%s'") % source)
1679
1679
1680 data = {}
1680 data = {}
1681 src = proc = None
1681 src = proc = None
1682 try:
1682 try:
1683 if spec.startswith(b"shell:"):
1683 if spec.startswith(b"shell:"):
1684 # external commands should be run relative to the repo root
1684 # external commands should be run relative to the repo root
1685 cmd = spec[6:]
1685 cmd = spec[6:]
1686 proc = subprocess.Popen(
1686 proc = subprocess.Popen(
1687 procutil.tonativestr(cmd),
1687 procutil.tonativestr(cmd),
1688 shell=True,
1688 shell=True,
1689 bufsize=-1,
1689 bufsize=-1,
1690 close_fds=procutil.closefds,
1690 close_fds=procutil.closefds,
1691 stdout=subprocess.PIPE,
1691 stdout=subprocess.PIPE,
1692 cwd=procutil.tonativestr(repo.root),
1692 cwd=procutil.tonativestr(repo.root),
1693 )
1693 )
1694 src = proc.stdout
1694 src = proc.stdout
1695 else:
1695 else:
1696 # treat as a URL or file
1696 # treat as a URL or file
1697 src = url.open(repo.ui, spec)
1697 src = url.open(repo.ui, spec)
1698 for l in src:
1698 for l in src:
1699 if b" " in l:
1699 if b" " in l:
1700 k, v = l.strip().split(b" ", 1)
1700 k, v = l.strip().split(b" ", 1)
1701 else:
1701 else:
1702 k, v = l.strip(), b""
1702 k, v = l.strip(), b""
1703
1703
1704 k = encoding.tolocal(k)
1704 k = encoding.tolocal(k)
1705 try:
1705 try:
1706 data[revsingle(repo, k).rev()] = encoding.tolocal(v)
1706 data[revsingle(repo, k).rev()] = encoding.tolocal(v)
1707 except (error.LookupError, error.RepoLookupError):
1707 except (error.LookupError, error.RepoLookupError):
1708 pass # we ignore data for nodes that don't exist locally
1708 pass # we ignore data for nodes that don't exist locally
1709 finally:
1709 finally:
1710 if proc:
1710 if proc:
1711 try:
1711 try:
1712 proc.communicate()
1712 proc.communicate()
1713 except ValueError:
1713 except ValueError:
1714 # This happens if we started iterating src and then
1714 # This happens if we started iterating src and then
1715 # get a parse error on a line. It should be safe to ignore.
1715 # get a parse error on a line. It should be safe to ignore.
1716 pass
1716 pass
1717 if src:
1717 if src:
1718 src.close()
1718 src.close()
1719 if proc and proc.returncode != 0:
1719 if proc and proc.returncode != 0:
1720 raise error.Abort(
1720 raise error.Abort(
1721 _(b"extdata command '%s' failed: %s")
1721 _(b"extdata command '%s' failed: %s")
1722 % (cmd, procutil.explainexit(proc.returncode))
1722 % (cmd, procutil.explainexit(proc.returncode))
1723 )
1723 )
1724
1724
1725 return data
1725 return data
1726
1726
1727
1727
1728 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1728 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1729 if lock is None:
1729 if lock is None:
1730 raise error.LockInheritanceContractViolation(
1730 raise error.LockInheritanceContractViolation(
1731 b'lock can only be inherited while held'
1731 b'lock can only be inherited while held'
1732 )
1732 )
1733 if environ is None:
1733 if environ is None:
1734 environ = {}
1734 environ = {}
1735 with lock.inherit() as locker:
1735 with lock.inherit() as locker:
1736 environ[envvar] = locker
1736 environ[envvar] = locker
1737 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1737 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1738
1738
1739
1739
1740 def wlocksub(repo, cmd, *args, **kwargs):
1740 def wlocksub(repo, cmd, *args, **kwargs):
1741 """run cmd as a subprocess that allows inheriting repo's wlock
1741 """run cmd as a subprocess that allows inheriting repo's wlock
1742
1742
1743 This can only be called while the wlock is held. This takes all the
1743 This can only be called while the wlock is held. This takes all the
1744 arguments that ui.system does, and returns the exit code of the
1744 arguments that ui.system does, and returns the exit code of the
1745 subprocess."""
1745 subprocess."""
1746 return _locksub(
1746 return _locksub(
1747 repo, repo.currentwlock(), b'HG_WLOCK_LOCKER', cmd, *args, **kwargs
1747 repo, repo.currentwlock(), b'HG_WLOCK_LOCKER', cmd, *args, **kwargs
1748 )
1748 )
1749
1749
1750
1750
1751 class progress(object):
1751 class progress(object):
1752 def __init__(self, ui, updatebar, topic, unit=b"", total=None):
1752 def __init__(self, ui, updatebar, topic, unit=b"", total=None):
1753 self.ui = ui
1753 self.ui = ui
1754 self.pos = 0
1754 self.pos = 0
1755 self.topic = topic
1755 self.topic = topic
1756 self.unit = unit
1756 self.unit = unit
1757 self.total = total
1757 self.total = total
1758 self.debug = ui.configbool(b'progress', b'debug')
1758 self.debug = ui.configbool(b'progress', b'debug')
1759 self._updatebar = updatebar
1759 self._updatebar = updatebar
1760
1760
1761 def __enter__(self):
1761 def __enter__(self):
1762 return self
1762 return self
1763
1763
1764 def __exit__(self, exc_type, exc_value, exc_tb):
1764 def __exit__(self, exc_type, exc_value, exc_tb):
1765 self.complete()
1765 self.complete()
1766
1766
1767 def update(self, pos, item=b"", total=None):
1767 def update(self, pos, item=b"", total=None):
1768 assert pos is not None
1768 assert pos is not None
1769 if total:
1769 if total:
1770 self.total = total
1770 self.total = total
1771 self.pos = pos
1771 self.pos = pos
1772 self._updatebar(self.topic, self.pos, item, self.unit, self.total)
1772 self._updatebar(self.topic, self.pos, item, self.unit, self.total)
1773 if self.debug:
1773 if self.debug:
1774 self._printdebug(item)
1774 self._printdebug(item)
1775
1775
1776 def increment(self, step=1, item=b"", total=None):
1776 def increment(self, step=1, item=b"", total=None):
1777 self.update(self.pos + step, item, total)
1777 self.update(self.pos + step, item, total)
1778
1778
1779 def complete(self):
1779 def complete(self):
1780 self.pos = None
1780 self.pos = None
1781 self.unit = b""
1781 self.unit = b""
1782 self.total = None
1782 self.total = None
1783 self._updatebar(self.topic, self.pos, b"", self.unit, self.total)
1783 self._updatebar(self.topic, self.pos, b"", self.unit, self.total)
1784
1784
1785 def _printdebug(self, item):
1785 def _printdebug(self, item):
1786 if self.unit:
1786 if self.unit:
1787 unit = b' ' + self.unit
1787 unit = b' ' + self.unit
1788 if item:
1788 if item:
1789 item = b' ' + item
1789 item = b' ' + item
1790
1790
1791 if self.total:
1791 if self.total:
1792 pct = 100.0 * self.pos / self.total
1792 pct = 100.0 * self.pos / self.total
1793 self.ui.debug(
1793 self.ui.debug(
1794 b'%s:%s %d/%d%s (%4.2f%%)\n'
1794 b'%s:%s %d/%d%s (%4.2f%%)\n'
1795 % (self.topic, item, self.pos, self.total, unit, pct)
1795 % (self.topic, item, self.pos, self.total, unit, pct)
1796 )
1796 )
1797 else:
1797 else:
1798 self.ui.debug(b'%s:%s %d%s\n' % (self.topic, item, self.pos, unit))
1798 self.ui.debug(b'%s:%s %d%s\n' % (self.topic, item, self.pos, unit))
1799
1799
1800
1800
1801 def gdinitconfig(ui):
1801 def gdinitconfig(ui):
1802 """helper function to know if a repo should be created as general delta
1802 """helper function to know if a repo should be created as general delta
1803 """
1803 """
1804 # experimental config: format.generaldelta
1804 # experimental config: format.generaldelta
1805 return ui.configbool(b'format', b'generaldelta') or ui.configbool(
1805 return ui.configbool(b'format', b'generaldelta') or ui.configbool(
1806 b'format', b'usegeneraldelta'
1806 b'format', b'usegeneraldelta'
1807 )
1807 )
1808
1808
1809
1809
1810 def gddeltaconfig(ui):
1810 def gddeltaconfig(ui):
1811 """helper function to know if incoming delta should be optimised
1811 """helper function to know if incoming delta should be optimised
1812 """
1812 """
1813 # experimental config: format.generaldelta
1813 # experimental config: format.generaldelta
1814 return ui.configbool(b'format', b'generaldelta')
1814 return ui.configbool(b'format', b'generaldelta')
1815
1815
1816
1816
1817 class simplekeyvaluefile(object):
1817 class simplekeyvaluefile(object):
1818 """A simple file with key=value lines
1818 """A simple file with key=value lines
1819
1819
1820 Keys must be alphanumerics and start with a letter, values must not
1820 Keys must be alphanumerics and start with a letter, values must not
1821 contain '\n' characters"""
1821 contain '\n' characters"""
1822
1822
1823 firstlinekey = b'__firstline'
1823 firstlinekey = b'__firstline'
1824
1824
1825 def __init__(self, vfs, path, keys=None):
1825 def __init__(self, vfs, path, keys=None):
1826 self.vfs = vfs
1826 self.vfs = vfs
1827 self.path = path
1827 self.path = path
1828
1828
1829 def read(self, firstlinenonkeyval=False):
1829 def read(self, firstlinenonkeyval=False):
1830 """Read the contents of a simple key-value file
1830 """Read the contents of a simple key-value file
1831
1831
1832 'firstlinenonkeyval' indicates whether the first line of file should
1832 'firstlinenonkeyval' indicates whether the first line of file should
1833 be treated as a key-value pair or reuturned fully under the
1833 be treated as a key-value pair or reuturned fully under the
1834 __firstline key."""
1834 __firstline key."""
1835 lines = self.vfs.readlines(self.path)
1835 lines = self.vfs.readlines(self.path)
1836 d = {}
1836 d = {}
1837 if firstlinenonkeyval:
1837 if firstlinenonkeyval:
1838 if not lines:
1838 if not lines:
1839 e = _(b"empty simplekeyvalue file")
1839 e = _(b"empty simplekeyvalue file")
1840 raise error.CorruptedState(e)
1840 raise error.CorruptedState(e)
1841 # we don't want to include '\n' in the __firstline
1841 # we don't want to include '\n' in the __firstline
1842 d[self.firstlinekey] = lines[0][:-1]
1842 d[self.firstlinekey] = lines[0][:-1]
1843 del lines[0]
1843 del lines[0]
1844
1844
1845 try:
1845 try:
1846 # the 'if line.strip()' part prevents us from failing on empty
1846 # the 'if line.strip()' part prevents us from failing on empty
1847 # lines which only contain '\n' therefore are not skipped
1847 # lines which only contain '\n' therefore are not skipped
1848 # by 'if line'
1848 # by 'if line'
1849 updatedict = dict(
1849 updatedict = dict(
1850 line[:-1].split(b'=', 1) for line in lines if line.strip()
1850 line[:-1].split(b'=', 1) for line in lines if line.strip()
1851 )
1851 )
1852 if self.firstlinekey in updatedict:
1852 if self.firstlinekey in updatedict:
1853 e = _(b"%r can't be used as a key")
1853 e = _(b"%r can't be used as a key")
1854 raise error.CorruptedState(e % self.firstlinekey)
1854 raise error.CorruptedState(e % self.firstlinekey)
1855 d.update(updatedict)
1855 d.update(updatedict)
1856 except ValueError as e:
1856 except ValueError as e:
1857 raise error.CorruptedState(str(e))
1857 raise error.CorruptedState(stringutil.forcebytestr(e))
1858 return d
1858 return d
1859
1859
1860 def write(self, data, firstline=None):
1860 def write(self, data, firstline=None):
1861 """Write key=>value mapping to a file
1861 """Write key=>value mapping to a file
1862 data is a dict. Keys must be alphanumerical and start with a letter.
1862 data is a dict. Keys must be alphanumerical and start with a letter.
1863 Values must not contain newline characters.
1863 Values must not contain newline characters.
1864
1864
1865 If 'firstline' is not None, it is written to file before
1865 If 'firstline' is not None, it is written to file before
1866 everything else, as it is, not in a key=value form"""
1866 everything else, as it is, not in a key=value form"""
1867 lines = []
1867 lines = []
1868 if firstline is not None:
1868 if firstline is not None:
1869 lines.append(b'%s\n' % firstline)
1869 lines.append(b'%s\n' % firstline)
1870
1870
1871 for k, v in data.items():
1871 for k, v in data.items():
1872 if k == self.firstlinekey:
1872 if k == self.firstlinekey:
1873 e = b"key name '%s' is reserved" % self.firstlinekey
1873 e = b"key name '%s' is reserved" % self.firstlinekey
1874 raise error.ProgrammingError(e)
1874 raise error.ProgrammingError(e)
1875 if not k[0:1].isalpha():
1875 if not k[0:1].isalpha():
1876 e = b"keys must start with a letter in a key-value file"
1876 e = b"keys must start with a letter in a key-value file"
1877 raise error.ProgrammingError(e)
1877 raise error.ProgrammingError(e)
1878 if not k.isalnum():
1878 if not k.isalnum():
1879 e = b"invalid key name in a simple key-value file"
1879 e = b"invalid key name in a simple key-value file"
1880 raise error.ProgrammingError(e)
1880 raise error.ProgrammingError(e)
1881 if b'\n' in v:
1881 if b'\n' in v:
1882 e = b"invalid value in a simple key-value file"
1882 e = b"invalid value in a simple key-value file"
1883 raise error.ProgrammingError(e)
1883 raise error.ProgrammingError(e)
1884 lines.append(b"%s=%s\n" % (k, v))
1884 lines.append(b"%s=%s\n" % (k, v))
1885 with self.vfs(self.path, mode=b'wb', atomictemp=True) as fp:
1885 with self.vfs(self.path, mode=b'wb', atomictemp=True) as fp:
1886 fp.write(b''.join(lines))
1886 fp.write(b''.join(lines))
1887
1887
1888
1888
1889 _reportobsoletedsource = [
1889 _reportobsoletedsource = [
1890 b'debugobsolete',
1890 b'debugobsolete',
1891 b'pull',
1891 b'pull',
1892 b'push',
1892 b'push',
1893 b'serve',
1893 b'serve',
1894 b'unbundle',
1894 b'unbundle',
1895 ]
1895 ]
1896
1896
1897 _reportnewcssource = [
1897 _reportnewcssource = [
1898 b'pull',
1898 b'pull',
1899 b'unbundle',
1899 b'unbundle',
1900 ]
1900 ]
1901
1901
1902
1902
1903 def prefetchfiles(repo, revs, match):
1903 def prefetchfiles(repo, revs, match):
1904 """Invokes the registered file prefetch functions, allowing extensions to
1904 """Invokes the registered file prefetch functions, allowing extensions to
1905 ensure the corresponding files are available locally, before the command
1905 ensure the corresponding files are available locally, before the command
1906 uses them."""
1906 uses them."""
1907 if match:
1907 if match:
1908 # The command itself will complain about files that don't exist, so
1908 # The command itself will complain about files that don't exist, so
1909 # don't duplicate the message.
1909 # don't duplicate the message.
1910 match = matchmod.badmatch(match, lambda fn, msg: None)
1910 match = matchmod.badmatch(match, lambda fn, msg: None)
1911 else:
1911 else:
1912 match = matchall(repo)
1912 match = matchall(repo)
1913
1913
1914 fileprefetchhooks(repo, revs, match)
1914 fileprefetchhooks(repo, revs, match)
1915
1915
1916
1916
1917 # a list of (repo, revs, match) prefetch functions
1917 # a list of (repo, revs, match) prefetch functions
1918 fileprefetchhooks = util.hooks()
1918 fileprefetchhooks = util.hooks()
1919
1919
1920 # A marker that tells the evolve extension to suppress its own reporting
1920 # A marker that tells the evolve extension to suppress its own reporting
1921 _reportstroubledchangesets = True
1921 _reportstroubledchangesets = True
1922
1922
1923
1923
1924 def registersummarycallback(repo, otr, txnname=b''):
1924 def registersummarycallback(repo, otr, txnname=b''):
1925 """register a callback to issue a summary after the transaction is closed
1925 """register a callback to issue a summary after the transaction is closed
1926 """
1926 """
1927
1927
1928 def txmatch(sources):
1928 def txmatch(sources):
1929 return any(txnname.startswith(source) for source in sources)
1929 return any(txnname.startswith(source) for source in sources)
1930
1930
1931 categories = []
1931 categories = []
1932
1932
1933 def reportsummary(func):
1933 def reportsummary(func):
1934 """decorator for report callbacks."""
1934 """decorator for report callbacks."""
1935 # The repoview life cycle is shorter than the one of the actual
1935 # The repoview life cycle is shorter than the one of the actual
1936 # underlying repository. So the filtered object can die before the
1936 # underlying repository. So the filtered object can die before the
1937 # weakref is used leading to troubles. We keep a reference to the
1937 # weakref is used leading to troubles. We keep a reference to the
1938 # unfiltered object and restore the filtering when retrieving the
1938 # unfiltered object and restore the filtering when retrieving the
1939 # repository through the weakref.
1939 # repository through the weakref.
1940 filtername = repo.filtername
1940 filtername = repo.filtername
1941 reporef = weakref.ref(repo.unfiltered())
1941 reporef = weakref.ref(repo.unfiltered())
1942
1942
1943 def wrapped(tr):
1943 def wrapped(tr):
1944 repo = reporef()
1944 repo = reporef()
1945 if filtername:
1945 if filtername:
1946 repo = repo.filtered(filtername)
1946 repo = repo.filtered(filtername)
1947 func(repo, tr)
1947 func(repo, tr)
1948
1948
1949 newcat = b'%02i-txnreport' % len(categories)
1949 newcat = b'%02i-txnreport' % len(categories)
1950 otr.addpostclose(newcat, wrapped)
1950 otr.addpostclose(newcat, wrapped)
1951 categories.append(newcat)
1951 categories.append(newcat)
1952 return wrapped
1952 return wrapped
1953
1953
1954 @reportsummary
1954 @reportsummary
1955 def reportchangegroup(repo, tr):
1955 def reportchangegroup(repo, tr):
1956 cgchangesets = tr.changes.get(b'changegroup-count-changesets', 0)
1956 cgchangesets = tr.changes.get(b'changegroup-count-changesets', 0)
1957 cgrevisions = tr.changes.get(b'changegroup-count-revisions', 0)
1957 cgrevisions = tr.changes.get(b'changegroup-count-revisions', 0)
1958 cgfiles = tr.changes.get(b'changegroup-count-files', 0)
1958 cgfiles = tr.changes.get(b'changegroup-count-files', 0)
1959 cgheads = tr.changes.get(b'changegroup-count-heads', 0)
1959 cgheads = tr.changes.get(b'changegroup-count-heads', 0)
1960 if cgchangesets or cgrevisions or cgfiles:
1960 if cgchangesets or cgrevisions or cgfiles:
1961 htext = b""
1961 htext = b""
1962 if cgheads:
1962 if cgheads:
1963 htext = _(b" (%+d heads)") % cgheads
1963 htext = _(b" (%+d heads)") % cgheads
1964 msg = _(b"added %d changesets with %d changes to %d files%s\n")
1964 msg = _(b"added %d changesets with %d changes to %d files%s\n")
1965 repo.ui.status(msg % (cgchangesets, cgrevisions, cgfiles, htext))
1965 repo.ui.status(msg % (cgchangesets, cgrevisions, cgfiles, htext))
1966
1966
1967 if txmatch(_reportobsoletedsource):
1967 if txmatch(_reportobsoletedsource):
1968
1968
1969 @reportsummary
1969 @reportsummary
1970 def reportobsoleted(repo, tr):
1970 def reportobsoleted(repo, tr):
1971 obsoleted = obsutil.getobsoleted(repo, tr)
1971 obsoleted = obsutil.getobsoleted(repo, tr)
1972 newmarkers = len(tr.changes.get(b'obsmarkers', ()))
1972 newmarkers = len(tr.changes.get(b'obsmarkers', ()))
1973 if newmarkers:
1973 if newmarkers:
1974 repo.ui.status(_(b'%i new obsolescence markers\n') % newmarkers)
1974 repo.ui.status(_(b'%i new obsolescence markers\n') % newmarkers)
1975 if obsoleted:
1975 if obsoleted:
1976 repo.ui.status(_(b'obsoleted %i changesets\n') % len(obsoleted))
1976 repo.ui.status(_(b'obsoleted %i changesets\n') % len(obsoleted))
1977
1977
1978 if obsolete.isenabled(
1978 if obsolete.isenabled(
1979 repo, obsolete.createmarkersopt
1979 repo, obsolete.createmarkersopt
1980 ) and repo.ui.configbool(
1980 ) and repo.ui.configbool(
1981 b'experimental', b'evolution.report-instabilities'
1981 b'experimental', b'evolution.report-instabilities'
1982 ):
1982 ):
1983 instabilitytypes = [
1983 instabilitytypes = [
1984 (b'orphan', b'orphan'),
1984 (b'orphan', b'orphan'),
1985 (b'phase-divergent', b'phasedivergent'),
1985 (b'phase-divergent', b'phasedivergent'),
1986 (b'content-divergent', b'contentdivergent'),
1986 (b'content-divergent', b'contentdivergent'),
1987 ]
1987 ]
1988
1988
1989 def getinstabilitycounts(repo):
1989 def getinstabilitycounts(repo):
1990 filtered = repo.changelog.filteredrevs
1990 filtered = repo.changelog.filteredrevs
1991 counts = {}
1991 counts = {}
1992 for instability, revset in instabilitytypes:
1992 for instability, revset in instabilitytypes:
1993 counts[instability] = len(
1993 counts[instability] = len(
1994 set(obsolete.getrevs(repo, revset)) - filtered
1994 set(obsolete.getrevs(repo, revset)) - filtered
1995 )
1995 )
1996 return counts
1996 return counts
1997
1997
1998 oldinstabilitycounts = getinstabilitycounts(repo)
1998 oldinstabilitycounts = getinstabilitycounts(repo)
1999
1999
2000 @reportsummary
2000 @reportsummary
2001 def reportnewinstabilities(repo, tr):
2001 def reportnewinstabilities(repo, tr):
2002 newinstabilitycounts = getinstabilitycounts(repo)
2002 newinstabilitycounts = getinstabilitycounts(repo)
2003 for instability, revset in instabilitytypes:
2003 for instability, revset in instabilitytypes:
2004 delta = (
2004 delta = (
2005 newinstabilitycounts[instability]
2005 newinstabilitycounts[instability]
2006 - oldinstabilitycounts[instability]
2006 - oldinstabilitycounts[instability]
2007 )
2007 )
2008 msg = getinstabilitymessage(delta, instability)
2008 msg = getinstabilitymessage(delta, instability)
2009 if msg:
2009 if msg:
2010 repo.ui.warn(msg)
2010 repo.ui.warn(msg)
2011
2011
2012 if txmatch(_reportnewcssource):
2012 if txmatch(_reportnewcssource):
2013
2013
2014 @reportsummary
2014 @reportsummary
2015 def reportnewcs(repo, tr):
2015 def reportnewcs(repo, tr):
2016 """Report the range of new revisions pulled/unbundled."""
2016 """Report the range of new revisions pulled/unbundled."""
2017 origrepolen = tr.changes.get(b'origrepolen', len(repo))
2017 origrepolen = tr.changes.get(b'origrepolen', len(repo))
2018 unfi = repo.unfiltered()
2018 unfi = repo.unfiltered()
2019 if origrepolen >= len(unfi):
2019 if origrepolen >= len(unfi):
2020 return
2020 return
2021
2021
2022 # Compute the bounds of new visible revisions' range.
2022 # Compute the bounds of new visible revisions' range.
2023 revs = smartset.spanset(repo, start=origrepolen)
2023 revs = smartset.spanset(repo, start=origrepolen)
2024 if revs:
2024 if revs:
2025 minrev, maxrev = repo[revs.min()], repo[revs.max()]
2025 minrev, maxrev = repo[revs.min()], repo[revs.max()]
2026
2026
2027 if minrev == maxrev:
2027 if minrev == maxrev:
2028 revrange = minrev
2028 revrange = minrev
2029 else:
2029 else:
2030 revrange = b'%s:%s' % (minrev, maxrev)
2030 revrange = b'%s:%s' % (minrev, maxrev)
2031 draft = len(repo.revs(b'%ld and draft()', revs))
2031 draft = len(repo.revs(b'%ld and draft()', revs))
2032 secret = len(repo.revs(b'%ld and secret()', revs))
2032 secret = len(repo.revs(b'%ld and secret()', revs))
2033 if not (draft or secret):
2033 if not (draft or secret):
2034 msg = _(b'new changesets %s\n') % revrange
2034 msg = _(b'new changesets %s\n') % revrange
2035 elif draft and secret:
2035 elif draft and secret:
2036 msg = _(b'new changesets %s (%d drafts, %d secrets)\n')
2036 msg = _(b'new changesets %s (%d drafts, %d secrets)\n')
2037 msg %= (revrange, draft, secret)
2037 msg %= (revrange, draft, secret)
2038 elif draft:
2038 elif draft:
2039 msg = _(b'new changesets %s (%d drafts)\n')
2039 msg = _(b'new changesets %s (%d drafts)\n')
2040 msg %= (revrange, draft)
2040 msg %= (revrange, draft)
2041 elif secret:
2041 elif secret:
2042 msg = _(b'new changesets %s (%d secrets)\n')
2042 msg = _(b'new changesets %s (%d secrets)\n')
2043 msg %= (revrange, secret)
2043 msg %= (revrange, secret)
2044 else:
2044 else:
2045 errormsg = b'entered unreachable condition'
2045 errormsg = b'entered unreachable condition'
2046 raise error.ProgrammingError(errormsg)
2046 raise error.ProgrammingError(errormsg)
2047 repo.ui.status(msg)
2047 repo.ui.status(msg)
2048
2048
2049 # search new changesets directly pulled as obsolete
2049 # search new changesets directly pulled as obsolete
2050 duplicates = tr.changes.get(b'revduplicates', ())
2050 duplicates = tr.changes.get(b'revduplicates', ())
2051 obsadded = unfi.revs(
2051 obsadded = unfi.revs(
2052 b'(%d: + %ld) and obsolete()', origrepolen, duplicates
2052 b'(%d: + %ld) and obsolete()', origrepolen, duplicates
2053 )
2053 )
2054 cl = repo.changelog
2054 cl = repo.changelog
2055 extinctadded = [r for r in obsadded if r not in cl]
2055 extinctadded = [r for r in obsadded if r not in cl]
2056 if extinctadded:
2056 if extinctadded:
2057 # They are not just obsolete, but obsolete and invisible
2057 # They are not just obsolete, but obsolete and invisible
2058 # we call them "extinct" internally but the terms have not been
2058 # we call them "extinct" internally but the terms have not been
2059 # exposed to users.
2059 # exposed to users.
2060 msg = b'(%d other changesets obsolete on arrival)\n'
2060 msg = b'(%d other changesets obsolete on arrival)\n'
2061 repo.ui.status(msg % len(extinctadded))
2061 repo.ui.status(msg % len(extinctadded))
2062
2062
2063 @reportsummary
2063 @reportsummary
2064 def reportphasechanges(repo, tr):
2064 def reportphasechanges(repo, tr):
2065 """Report statistics of phase changes for changesets pre-existing
2065 """Report statistics of phase changes for changesets pre-existing
2066 pull/unbundle.
2066 pull/unbundle.
2067 """
2067 """
2068 origrepolen = tr.changes.get(b'origrepolen', len(repo))
2068 origrepolen = tr.changes.get(b'origrepolen', len(repo))
2069 phasetracking = tr.changes.get(b'phases', {})
2069 phasetracking = tr.changes.get(b'phases', {})
2070 if not phasetracking:
2070 if not phasetracking:
2071 return
2071 return
2072 published = [
2072 published = [
2073 rev
2073 rev
2074 for rev, (old, new) in pycompat.iteritems(phasetracking)
2074 for rev, (old, new) in pycompat.iteritems(phasetracking)
2075 if new == phases.public and rev < origrepolen
2075 if new == phases.public and rev < origrepolen
2076 ]
2076 ]
2077 if not published:
2077 if not published:
2078 return
2078 return
2079 repo.ui.status(
2079 repo.ui.status(
2080 _(b'%d local changesets published\n') % len(published)
2080 _(b'%d local changesets published\n') % len(published)
2081 )
2081 )
2082
2082
2083
2083
2084 def getinstabilitymessage(delta, instability):
2084 def getinstabilitymessage(delta, instability):
2085 """function to return the message to show warning about new instabilities
2085 """function to return the message to show warning about new instabilities
2086
2086
2087 exists as a separate function so that extension can wrap to show more
2087 exists as a separate function so that extension can wrap to show more
2088 information like how to fix instabilities"""
2088 information like how to fix instabilities"""
2089 if delta > 0:
2089 if delta > 0:
2090 return _(b'%i new %s changesets\n') % (delta, instability)
2090 return _(b'%i new %s changesets\n') % (delta, instability)
2091
2091
2092
2092
2093 def nodesummaries(repo, nodes, maxnumnodes=4):
2093 def nodesummaries(repo, nodes, maxnumnodes=4):
2094 if len(nodes) <= maxnumnodes or repo.ui.verbose:
2094 if len(nodes) <= maxnumnodes or repo.ui.verbose:
2095 return b' '.join(short(h) for h in nodes)
2095 return b' '.join(short(h) for h in nodes)
2096 first = b' '.join(short(h) for h in nodes[:maxnumnodes])
2096 first = b' '.join(short(h) for h in nodes[:maxnumnodes])
2097 return _(b"%s and %d others") % (first, len(nodes) - maxnumnodes)
2097 return _(b"%s and %d others") % (first, len(nodes) - maxnumnodes)
2098
2098
2099
2099
2100 def enforcesinglehead(repo, tr, desc, accountclosed=False):
2100 def enforcesinglehead(repo, tr, desc, accountclosed=False):
2101 """check that no named branch has multiple heads"""
2101 """check that no named branch has multiple heads"""
2102 if desc in (b'strip', b'repair'):
2102 if desc in (b'strip', b'repair'):
2103 # skip the logic during strip
2103 # skip the logic during strip
2104 return
2104 return
2105 visible = repo.filtered(b'visible')
2105 visible = repo.filtered(b'visible')
2106 # possible improvement: we could restrict the check to affected branch
2106 # possible improvement: we could restrict the check to affected branch
2107 bm = visible.branchmap()
2107 bm = visible.branchmap()
2108 for name in bm:
2108 for name in bm:
2109 heads = bm.branchheads(name, closed=accountclosed)
2109 heads = bm.branchheads(name, closed=accountclosed)
2110 if len(heads) > 1:
2110 if len(heads) > 1:
2111 msg = _(b'rejecting multiple heads on branch "%s"')
2111 msg = _(b'rejecting multiple heads on branch "%s"')
2112 msg %= name
2112 msg %= name
2113 hint = _(b'%d heads: %s')
2113 hint = _(b'%d heads: %s')
2114 hint %= (len(heads), nodesummaries(repo, heads))
2114 hint %= (len(heads), nodesummaries(repo, heads))
2115 raise error.Abort(msg, hint=hint)
2115 raise error.Abort(msg, hint=hint)
2116
2116
2117
2117
2118 def wrapconvertsink(sink):
2118 def wrapconvertsink(sink):
2119 """Allow extensions to wrap the sink returned by convcmd.convertsink()
2119 """Allow extensions to wrap the sink returned by convcmd.convertsink()
2120 before it is used, whether or not the convert extension was formally loaded.
2120 before it is used, whether or not the convert extension was formally loaded.
2121 """
2121 """
2122 return sink
2122 return sink
2123
2123
2124
2124
2125 def unhidehashlikerevs(repo, specs, hiddentype):
2125 def unhidehashlikerevs(repo, specs, hiddentype):
2126 """parse the user specs and unhide changesets whose hash or revision number
2126 """parse the user specs and unhide changesets whose hash or revision number
2127 is passed.
2127 is passed.
2128
2128
2129 hiddentype can be: 1) 'warn': warn while unhiding changesets
2129 hiddentype can be: 1) 'warn': warn while unhiding changesets
2130 2) 'nowarn': don't warn while unhiding changesets
2130 2) 'nowarn': don't warn while unhiding changesets
2131
2131
2132 returns a repo object with the required changesets unhidden
2132 returns a repo object with the required changesets unhidden
2133 """
2133 """
2134 if not repo.filtername or not repo.ui.configbool(
2134 if not repo.filtername or not repo.ui.configbool(
2135 b'experimental', b'directaccess'
2135 b'experimental', b'directaccess'
2136 ):
2136 ):
2137 return repo
2137 return repo
2138
2138
2139 if repo.filtername not in (b'visible', b'visible-hidden'):
2139 if repo.filtername not in (b'visible', b'visible-hidden'):
2140 return repo
2140 return repo
2141
2141
2142 symbols = set()
2142 symbols = set()
2143 for spec in specs:
2143 for spec in specs:
2144 try:
2144 try:
2145 tree = revsetlang.parse(spec)
2145 tree = revsetlang.parse(spec)
2146 except error.ParseError: # will be reported by scmutil.revrange()
2146 except error.ParseError: # will be reported by scmutil.revrange()
2147 continue
2147 continue
2148
2148
2149 symbols.update(revsetlang.gethashlikesymbols(tree))
2149 symbols.update(revsetlang.gethashlikesymbols(tree))
2150
2150
2151 if not symbols:
2151 if not symbols:
2152 return repo
2152 return repo
2153
2153
2154 revs = _getrevsfromsymbols(repo, symbols)
2154 revs = _getrevsfromsymbols(repo, symbols)
2155
2155
2156 if not revs:
2156 if not revs:
2157 return repo
2157 return repo
2158
2158
2159 if hiddentype == b'warn':
2159 if hiddentype == b'warn':
2160 unfi = repo.unfiltered()
2160 unfi = repo.unfiltered()
2161 revstr = b", ".join([pycompat.bytestr(unfi[l]) for l in revs])
2161 revstr = b", ".join([pycompat.bytestr(unfi[l]) for l in revs])
2162 repo.ui.warn(
2162 repo.ui.warn(
2163 _(
2163 _(
2164 b"warning: accessing hidden changesets for write "
2164 b"warning: accessing hidden changesets for write "
2165 b"operation: %s\n"
2165 b"operation: %s\n"
2166 )
2166 )
2167 % revstr
2167 % revstr
2168 )
2168 )
2169
2169
2170 # we have to use new filtername to separate branch/tags cache until we can
2170 # we have to use new filtername to separate branch/tags cache until we can
2171 # disbale these cache when revisions are dynamically pinned.
2171 # disbale these cache when revisions are dynamically pinned.
2172 return repo.filtered(b'visible-hidden', revs)
2172 return repo.filtered(b'visible-hidden', revs)
2173
2173
2174
2174
2175 def _getrevsfromsymbols(repo, symbols):
2175 def _getrevsfromsymbols(repo, symbols):
2176 """parse the list of symbols and returns a set of revision numbers of hidden
2176 """parse the list of symbols and returns a set of revision numbers of hidden
2177 changesets present in symbols"""
2177 changesets present in symbols"""
2178 revs = set()
2178 revs = set()
2179 unfi = repo.unfiltered()
2179 unfi = repo.unfiltered()
2180 unficl = unfi.changelog
2180 unficl = unfi.changelog
2181 cl = repo.changelog
2181 cl = repo.changelog
2182 tiprev = len(unficl)
2182 tiprev = len(unficl)
2183 allowrevnums = repo.ui.configbool(b'experimental', b'directaccess.revnums')
2183 allowrevnums = repo.ui.configbool(b'experimental', b'directaccess.revnums')
2184 for s in symbols:
2184 for s in symbols:
2185 try:
2185 try:
2186 n = int(s)
2186 n = int(s)
2187 if n <= tiprev:
2187 if n <= tiprev:
2188 if not allowrevnums:
2188 if not allowrevnums:
2189 continue
2189 continue
2190 else:
2190 else:
2191 if n not in cl:
2191 if n not in cl:
2192 revs.add(n)
2192 revs.add(n)
2193 continue
2193 continue
2194 except ValueError:
2194 except ValueError:
2195 pass
2195 pass
2196
2196
2197 try:
2197 try:
2198 s = resolvehexnodeidprefix(unfi, s)
2198 s = resolvehexnodeidprefix(unfi, s)
2199 except (error.LookupError, error.WdirUnsupported):
2199 except (error.LookupError, error.WdirUnsupported):
2200 s = None
2200 s = None
2201
2201
2202 if s is not None:
2202 if s is not None:
2203 rev = unficl.rev(s)
2203 rev = unficl.rev(s)
2204 if rev not in cl:
2204 if rev not in cl:
2205 revs.add(rev)
2205 revs.add(rev)
2206
2206
2207 return revs
2207 return revs
2208
2208
2209
2209
2210 def bookmarkrevs(repo, mark):
2210 def bookmarkrevs(repo, mark):
2211 """
2211 """
2212 Select revisions reachable by a given bookmark
2212 Select revisions reachable by a given bookmark
2213 """
2213 """
2214 return repo.revs(
2214 return repo.revs(
2215 b"ancestors(bookmark(%s)) - "
2215 b"ancestors(bookmark(%s)) - "
2216 b"ancestors(head() and not bookmark(%s)) - "
2216 b"ancestors(head() and not bookmark(%s)) - "
2217 b"ancestors(bookmark() and not bookmark(%s))",
2217 b"ancestors(bookmark() and not bookmark(%s))",
2218 mark,
2218 mark,
2219 mark,
2219 mark,
2220 mark,
2220 mark,
2221 )
2221 )
General Comments 0
You need to be logged in to leave comments. Login now