##// END OF EJS Templates
scmutil: add option to register summary callbacks as transaction validators...
Pulkit Goyal -
r45032:13da36d7 default
parent child Browse files
Show More
@@ -1,2202 +1,2214 b''
1 # scmutil.py - Mercurial core utility functions
1 # scmutil.py - Mercurial core utility functions
2 #
2 #
3 # Copyright Matt Mackall <mpm@selenic.com>
3 # Copyright Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import glob
11 import glob
12 import os
12 import os
13 import posixpath
13 import posixpath
14 import re
14 import re
15 import subprocess
15 import subprocess
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 bin,
20 bin,
21 hex,
21 hex,
22 nullid,
22 nullid,
23 nullrev,
23 nullrev,
24 short,
24 short,
25 wdirid,
25 wdirid,
26 wdirrev,
26 wdirrev,
27 )
27 )
28 from .pycompat import getattr
28 from .pycompat import getattr
29 from .thirdparty import attr
29 from .thirdparty import attr
30 from . import (
30 from . import (
31 copies as copiesmod,
31 copies as copiesmod,
32 encoding,
32 encoding,
33 error,
33 error,
34 match as matchmod,
34 match as matchmod,
35 obsolete,
35 obsolete,
36 obsutil,
36 obsutil,
37 pathutil,
37 pathutil,
38 phases,
38 phases,
39 policy,
39 policy,
40 pycompat,
40 pycompat,
41 revsetlang,
41 revsetlang,
42 similar,
42 similar,
43 smartset,
43 smartset,
44 url,
44 url,
45 util,
45 util,
46 vfs,
46 vfs,
47 )
47 )
48
48
49 from .utils import (
49 from .utils import (
50 hashutil,
50 hashutil,
51 procutil,
51 procutil,
52 stringutil,
52 stringutil,
53 )
53 )
54
54
55 if pycompat.iswindows:
55 if pycompat.iswindows:
56 from . import scmwindows as scmplatform
56 from . import scmwindows as scmplatform
57 else:
57 else:
58 from . import scmposix as scmplatform
58 from . import scmposix as scmplatform
59
59
60 parsers = policy.importmod('parsers')
60 parsers = policy.importmod('parsers')
61 rustrevlog = policy.importrust('revlog')
61 rustrevlog = policy.importrust('revlog')
62
62
63 termsize = scmplatform.termsize
63 termsize = scmplatform.termsize
64
64
65
65
66 @attr.s(slots=True, repr=False)
66 @attr.s(slots=True, repr=False)
67 class status(object):
67 class status(object):
68 '''Struct with a list of files per status.
68 '''Struct with a list of files per status.
69
69
70 The 'deleted', 'unknown' and 'ignored' properties are only
70 The 'deleted', 'unknown' and 'ignored' properties are only
71 relevant to the working copy.
71 relevant to the working copy.
72 '''
72 '''
73
73
74 modified = attr.ib(default=attr.Factory(list))
74 modified = attr.ib(default=attr.Factory(list))
75 added = attr.ib(default=attr.Factory(list))
75 added = attr.ib(default=attr.Factory(list))
76 removed = attr.ib(default=attr.Factory(list))
76 removed = attr.ib(default=attr.Factory(list))
77 deleted = attr.ib(default=attr.Factory(list))
77 deleted = attr.ib(default=attr.Factory(list))
78 unknown = attr.ib(default=attr.Factory(list))
78 unknown = attr.ib(default=attr.Factory(list))
79 ignored = attr.ib(default=attr.Factory(list))
79 ignored = attr.ib(default=attr.Factory(list))
80 clean = attr.ib(default=attr.Factory(list))
80 clean = attr.ib(default=attr.Factory(list))
81
81
82 def __iter__(self):
82 def __iter__(self):
83 yield self.modified
83 yield self.modified
84 yield self.added
84 yield self.added
85 yield self.removed
85 yield self.removed
86 yield self.deleted
86 yield self.deleted
87 yield self.unknown
87 yield self.unknown
88 yield self.ignored
88 yield self.ignored
89 yield self.clean
89 yield self.clean
90
90
91 def __repr__(self):
91 def __repr__(self):
92 return (
92 return (
93 r'<status modified=%s, added=%s, removed=%s, deleted=%s, '
93 r'<status modified=%s, added=%s, removed=%s, deleted=%s, '
94 r'unknown=%s, ignored=%s, clean=%s>'
94 r'unknown=%s, ignored=%s, clean=%s>'
95 ) % tuple(pycompat.sysstr(stringutil.pprint(v)) for v in self)
95 ) % tuple(pycompat.sysstr(stringutil.pprint(v)) for v in self)
96
96
97
97
98 def itersubrepos(ctx1, ctx2):
98 def itersubrepos(ctx1, ctx2):
99 """find subrepos in ctx1 or ctx2"""
99 """find subrepos in ctx1 or ctx2"""
100 # Create a (subpath, ctx) mapping where we prefer subpaths from
100 # Create a (subpath, ctx) mapping where we prefer subpaths from
101 # ctx1. The subpaths from ctx2 are important when the .hgsub file
101 # ctx1. The subpaths from ctx2 are important when the .hgsub file
102 # has been modified (in ctx2) but not yet committed (in ctx1).
102 # has been modified (in ctx2) but not yet committed (in ctx1).
103 subpaths = dict.fromkeys(ctx2.substate, ctx2)
103 subpaths = dict.fromkeys(ctx2.substate, ctx2)
104 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
104 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
105
105
106 missing = set()
106 missing = set()
107
107
108 for subpath in ctx2.substate:
108 for subpath in ctx2.substate:
109 if subpath not in ctx1.substate:
109 if subpath not in ctx1.substate:
110 del subpaths[subpath]
110 del subpaths[subpath]
111 missing.add(subpath)
111 missing.add(subpath)
112
112
113 for subpath, ctx in sorted(pycompat.iteritems(subpaths)):
113 for subpath, ctx in sorted(pycompat.iteritems(subpaths)):
114 yield subpath, ctx.sub(subpath)
114 yield subpath, ctx.sub(subpath)
115
115
116 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
116 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
117 # status and diff will have an accurate result when it does
117 # status and diff will have an accurate result when it does
118 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
118 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
119 # against itself.
119 # against itself.
120 for subpath in missing:
120 for subpath in missing:
121 yield subpath, ctx2.nullsub(subpath, ctx1)
121 yield subpath, ctx2.nullsub(subpath, ctx1)
122
122
123
123
124 def nochangesfound(ui, repo, excluded=None):
124 def nochangesfound(ui, repo, excluded=None):
125 '''Report no changes for push/pull, excluded is None or a list of
125 '''Report no changes for push/pull, excluded is None or a list of
126 nodes excluded from the push/pull.
126 nodes excluded from the push/pull.
127 '''
127 '''
128 secretlist = []
128 secretlist = []
129 if excluded:
129 if excluded:
130 for n in excluded:
130 for n in excluded:
131 ctx = repo[n]
131 ctx = repo[n]
132 if ctx.phase() >= phases.secret and not ctx.extinct():
132 if ctx.phase() >= phases.secret and not ctx.extinct():
133 secretlist.append(n)
133 secretlist.append(n)
134
134
135 if secretlist:
135 if secretlist:
136 ui.status(
136 ui.status(
137 _(b"no changes found (ignored %d secret changesets)\n")
137 _(b"no changes found (ignored %d secret changesets)\n")
138 % len(secretlist)
138 % len(secretlist)
139 )
139 )
140 else:
140 else:
141 ui.status(_(b"no changes found\n"))
141 ui.status(_(b"no changes found\n"))
142
142
143
143
144 def callcatch(ui, func):
144 def callcatch(ui, func):
145 """call func() with global exception handling
145 """call func() with global exception handling
146
146
147 return func() if no exception happens. otherwise do some error handling
147 return func() if no exception happens. otherwise do some error handling
148 and return an exit code accordingly. does not handle all exceptions.
148 and return an exit code accordingly. does not handle all exceptions.
149 """
149 """
150 try:
150 try:
151 try:
151 try:
152 return func()
152 return func()
153 except: # re-raises
153 except: # re-raises
154 ui.traceback()
154 ui.traceback()
155 raise
155 raise
156 # Global exception handling, alphabetically
156 # Global exception handling, alphabetically
157 # Mercurial-specific first, followed by built-in and library exceptions
157 # Mercurial-specific first, followed by built-in and library exceptions
158 except error.LockHeld as inst:
158 except error.LockHeld as inst:
159 if inst.errno == errno.ETIMEDOUT:
159 if inst.errno == errno.ETIMEDOUT:
160 reason = _(b'timed out waiting for lock held by %r') % (
160 reason = _(b'timed out waiting for lock held by %r') % (
161 pycompat.bytestr(inst.locker)
161 pycompat.bytestr(inst.locker)
162 )
162 )
163 else:
163 else:
164 reason = _(b'lock held by %r') % inst.locker
164 reason = _(b'lock held by %r') % inst.locker
165 ui.error(
165 ui.error(
166 _(b"abort: %s: %s\n")
166 _(b"abort: %s: %s\n")
167 % (inst.desc or stringutil.forcebytestr(inst.filename), reason)
167 % (inst.desc or stringutil.forcebytestr(inst.filename), reason)
168 )
168 )
169 if not inst.locker:
169 if not inst.locker:
170 ui.error(_(b"(lock might be very busy)\n"))
170 ui.error(_(b"(lock might be very busy)\n"))
171 except error.LockUnavailable as inst:
171 except error.LockUnavailable as inst:
172 ui.error(
172 ui.error(
173 _(b"abort: could not lock %s: %s\n")
173 _(b"abort: could not lock %s: %s\n")
174 % (
174 % (
175 inst.desc or stringutil.forcebytestr(inst.filename),
175 inst.desc or stringutil.forcebytestr(inst.filename),
176 encoding.strtolocal(inst.strerror),
176 encoding.strtolocal(inst.strerror),
177 )
177 )
178 )
178 )
179 except error.OutOfBandError as inst:
179 except error.OutOfBandError as inst:
180 if inst.args:
180 if inst.args:
181 msg = _(b"abort: remote error:\n")
181 msg = _(b"abort: remote error:\n")
182 else:
182 else:
183 msg = _(b"abort: remote error\n")
183 msg = _(b"abort: remote error\n")
184 ui.error(msg)
184 ui.error(msg)
185 if inst.args:
185 if inst.args:
186 ui.error(b''.join(inst.args))
186 ui.error(b''.join(inst.args))
187 if inst.hint:
187 if inst.hint:
188 ui.error(b'(%s)\n' % inst.hint)
188 ui.error(b'(%s)\n' % inst.hint)
189 except error.RepoError as inst:
189 except error.RepoError as inst:
190 ui.error(_(b"abort: %s!\n") % inst)
190 ui.error(_(b"abort: %s!\n") % inst)
191 if inst.hint:
191 if inst.hint:
192 ui.error(_(b"(%s)\n") % inst.hint)
192 ui.error(_(b"(%s)\n") % inst.hint)
193 except error.ResponseError as inst:
193 except error.ResponseError as inst:
194 ui.error(_(b"abort: %s") % inst.args[0])
194 ui.error(_(b"abort: %s") % inst.args[0])
195 msg = inst.args[1]
195 msg = inst.args[1]
196 if isinstance(msg, type(u'')):
196 if isinstance(msg, type(u'')):
197 msg = pycompat.sysbytes(msg)
197 msg = pycompat.sysbytes(msg)
198 if not isinstance(msg, bytes):
198 if not isinstance(msg, bytes):
199 ui.error(b" %r\n" % (msg,))
199 ui.error(b" %r\n" % (msg,))
200 elif not msg:
200 elif not msg:
201 ui.error(_(b" empty string\n"))
201 ui.error(_(b" empty string\n"))
202 else:
202 else:
203 ui.error(b"\n%r\n" % pycompat.bytestr(stringutil.ellipsis(msg)))
203 ui.error(b"\n%r\n" % pycompat.bytestr(stringutil.ellipsis(msg)))
204 except error.CensoredNodeError as inst:
204 except error.CensoredNodeError as inst:
205 ui.error(_(b"abort: file censored %s!\n") % inst)
205 ui.error(_(b"abort: file censored %s!\n") % inst)
206 except error.StorageError as inst:
206 except error.StorageError as inst:
207 ui.error(_(b"abort: %s!\n") % inst)
207 ui.error(_(b"abort: %s!\n") % inst)
208 if inst.hint:
208 if inst.hint:
209 ui.error(_(b"(%s)\n") % inst.hint)
209 ui.error(_(b"(%s)\n") % inst.hint)
210 except error.InterventionRequired as inst:
210 except error.InterventionRequired as inst:
211 ui.error(b"%s\n" % inst)
211 ui.error(b"%s\n" % inst)
212 if inst.hint:
212 if inst.hint:
213 ui.error(_(b"(%s)\n") % inst.hint)
213 ui.error(_(b"(%s)\n") % inst.hint)
214 return 1
214 return 1
215 except error.WdirUnsupported:
215 except error.WdirUnsupported:
216 ui.error(_(b"abort: working directory revision cannot be specified\n"))
216 ui.error(_(b"abort: working directory revision cannot be specified\n"))
217 except error.Abort as inst:
217 except error.Abort as inst:
218 ui.error(_(b"abort: %s\n") % inst)
218 ui.error(_(b"abort: %s\n") % inst)
219 if inst.hint:
219 if inst.hint:
220 ui.error(_(b"(%s)\n") % inst.hint)
220 ui.error(_(b"(%s)\n") % inst.hint)
221 except ImportError as inst:
221 except ImportError as inst:
222 ui.error(_(b"abort: %s!\n") % stringutil.forcebytestr(inst))
222 ui.error(_(b"abort: %s!\n") % stringutil.forcebytestr(inst))
223 m = stringutil.forcebytestr(inst).split()[-1]
223 m = stringutil.forcebytestr(inst).split()[-1]
224 if m in b"mpatch bdiff".split():
224 if m in b"mpatch bdiff".split():
225 ui.error(_(b"(did you forget to compile extensions?)\n"))
225 ui.error(_(b"(did you forget to compile extensions?)\n"))
226 elif m in b"zlib".split():
226 elif m in b"zlib".split():
227 ui.error(_(b"(is your Python install correct?)\n"))
227 ui.error(_(b"(is your Python install correct?)\n"))
228 except (IOError, OSError) as inst:
228 except (IOError, OSError) as inst:
229 if util.safehasattr(inst, b"code"): # HTTPError
229 if util.safehasattr(inst, b"code"): # HTTPError
230 ui.error(_(b"abort: %s\n") % stringutil.forcebytestr(inst))
230 ui.error(_(b"abort: %s\n") % stringutil.forcebytestr(inst))
231 elif util.safehasattr(inst, b"reason"): # URLError or SSLError
231 elif util.safehasattr(inst, b"reason"): # URLError or SSLError
232 try: # usually it is in the form (errno, strerror)
232 try: # usually it is in the form (errno, strerror)
233 reason = inst.reason.args[1]
233 reason = inst.reason.args[1]
234 except (AttributeError, IndexError):
234 except (AttributeError, IndexError):
235 # it might be anything, for example a string
235 # it might be anything, for example a string
236 reason = inst.reason
236 reason = inst.reason
237 if isinstance(reason, pycompat.unicode):
237 if isinstance(reason, pycompat.unicode):
238 # SSLError of Python 2.7.9 contains a unicode
238 # SSLError of Python 2.7.9 contains a unicode
239 reason = encoding.unitolocal(reason)
239 reason = encoding.unitolocal(reason)
240 ui.error(_(b"abort: error: %s\n") % stringutil.forcebytestr(reason))
240 ui.error(_(b"abort: error: %s\n") % stringutil.forcebytestr(reason))
241 elif (
241 elif (
242 util.safehasattr(inst, b"args")
242 util.safehasattr(inst, b"args")
243 and inst.args
243 and inst.args
244 and inst.args[0] == errno.EPIPE
244 and inst.args[0] == errno.EPIPE
245 ):
245 ):
246 pass
246 pass
247 elif getattr(inst, "strerror", None): # common IOError or OSError
247 elif getattr(inst, "strerror", None): # common IOError or OSError
248 if getattr(inst, "filename", None) is not None:
248 if getattr(inst, "filename", None) is not None:
249 ui.error(
249 ui.error(
250 _(b"abort: %s: '%s'\n")
250 _(b"abort: %s: '%s'\n")
251 % (
251 % (
252 encoding.strtolocal(inst.strerror),
252 encoding.strtolocal(inst.strerror),
253 stringutil.forcebytestr(inst.filename),
253 stringutil.forcebytestr(inst.filename),
254 )
254 )
255 )
255 )
256 else:
256 else:
257 ui.error(_(b"abort: %s\n") % encoding.strtolocal(inst.strerror))
257 ui.error(_(b"abort: %s\n") % encoding.strtolocal(inst.strerror))
258 else: # suspicious IOError
258 else: # suspicious IOError
259 raise
259 raise
260 except MemoryError:
260 except MemoryError:
261 ui.error(_(b"abort: out of memory\n"))
261 ui.error(_(b"abort: out of memory\n"))
262 except SystemExit as inst:
262 except SystemExit as inst:
263 # Commands shouldn't sys.exit directly, but give a return code.
263 # Commands shouldn't sys.exit directly, but give a return code.
264 # Just in case catch this and and pass exit code to caller.
264 # Just in case catch this and and pass exit code to caller.
265 return inst.code
265 return inst.code
266
266
267 return -1
267 return -1
268
268
269
269
270 def checknewlabel(repo, lbl, kind):
270 def checknewlabel(repo, lbl, kind):
271 # Do not use the "kind" parameter in ui output.
271 # Do not use the "kind" parameter in ui output.
272 # It makes strings difficult to translate.
272 # It makes strings difficult to translate.
273 if lbl in [b'tip', b'.', b'null']:
273 if lbl in [b'tip', b'.', b'null']:
274 raise error.Abort(_(b"the name '%s' is reserved") % lbl)
274 raise error.Abort(_(b"the name '%s' is reserved") % lbl)
275 for c in (b':', b'\0', b'\n', b'\r'):
275 for c in (b':', b'\0', b'\n', b'\r'):
276 if c in lbl:
276 if c in lbl:
277 raise error.Abort(
277 raise error.Abort(
278 _(b"%r cannot be used in a name") % pycompat.bytestr(c)
278 _(b"%r cannot be used in a name") % pycompat.bytestr(c)
279 )
279 )
280 try:
280 try:
281 int(lbl)
281 int(lbl)
282 raise error.Abort(_(b"cannot use an integer as a name"))
282 raise error.Abort(_(b"cannot use an integer as a name"))
283 except ValueError:
283 except ValueError:
284 pass
284 pass
285 if lbl.strip() != lbl:
285 if lbl.strip() != lbl:
286 raise error.Abort(_(b"leading or trailing whitespace in name %r") % lbl)
286 raise error.Abort(_(b"leading or trailing whitespace in name %r") % lbl)
287
287
288
288
289 def checkfilename(f):
289 def checkfilename(f):
290 '''Check that the filename f is an acceptable filename for a tracked file'''
290 '''Check that the filename f is an acceptable filename for a tracked file'''
291 if b'\r' in f or b'\n' in f:
291 if b'\r' in f or b'\n' in f:
292 raise error.Abort(
292 raise error.Abort(
293 _(b"'\\n' and '\\r' disallowed in filenames: %r")
293 _(b"'\\n' and '\\r' disallowed in filenames: %r")
294 % pycompat.bytestr(f)
294 % pycompat.bytestr(f)
295 )
295 )
296
296
297
297
298 def checkportable(ui, f):
298 def checkportable(ui, f):
299 '''Check if filename f is portable and warn or abort depending on config'''
299 '''Check if filename f is portable and warn or abort depending on config'''
300 checkfilename(f)
300 checkfilename(f)
301 abort, warn = checkportabilityalert(ui)
301 abort, warn = checkportabilityalert(ui)
302 if abort or warn:
302 if abort or warn:
303 msg = util.checkwinfilename(f)
303 msg = util.checkwinfilename(f)
304 if msg:
304 if msg:
305 msg = b"%s: %s" % (msg, procutil.shellquote(f))
305 msg = b"%s: %s" % (msg, procutil.shellquote(f))
306 if abort:
306 if abort:
307 raise error.Abort(msg)
307 raise error.Abort(msg)
308 ui.warn(_(b"warning: %s\n") % msg)
308 ui.warn(_(b"warning: %s\n") % msg)
309
309
310
310
311 def checkportabilityalert(ui):
311 def checkportabilityalert(ui):
312 '''check if the user's config requests nothing, a warning, or abort for
312 '''check if the user's config requests nothing, a warning, or abort for
313 non-portable filenames'''
313 non-portable filenames'''
314 val = ui.config(b'ui', b'portablefilenames')
314 val = ui.config(b'ui', b'portablefilenames')
315 lval = val.lower()
315 lval = val.lower()
316 bval = stringutil.parsebool(val)
316 bval = stringutil.parsebool(val)
317 abort = pycompat.iswindows or lval == b'abort'
317 abort = pycompat.iswindows or lval == b'abort'
318 warn = bval or lval == b'warn'
318 warn = bval or lval == b'warn'
319 if bval is None and not (warn or abort or lval == b'ignore'):
319 if bval is None and not (warn or abort or lval == b'ignore'):
320 raise error.ConfigError(
320 raise error.ConfigError(
321 _(b"ui.portablefilenames value is invalid ('%s')") % val
321 _(b"ui.portablefilenames value is invalid ('%s')") % val
322 )
322 )
323 return abort, warn
323 return abort, warn
324
324
325
325
326 class casecollisionauditor(object):
326 class casecollisionauditor(object):
327 def __init__(self, ui, abort, dirstate):
327 def __init__(self, ui, abort, dirstate):
328 self._ui = ui
328 self._ui = ui
329 self._abort = abort
329 self._abort = abort
330 allfiles = b'\0'.join(dirstate)
330 allfiles = b'\0'.join(dirstate)
331 self._loweredfiles = set(encoding.lower(allfiles).split(b'\0'))
331 self._loweredfiles = set(encoding.lower(allfiles).split(b'\0'))
332 self._dirstate = dirstate
332 self._dirstate = dirstate
333 # The purpose of _newfiles is so that we don't complain about
333 # The purpose of _newfiles is so that we don't complain about
334 # case collisions if someone were to call this object with the
334 # case collisions if someone were to call this object with the
335 # same filename twice.
335 # same filename twice.
336 self._newfiles = set()
336 self._newfiles = set()
337
337
338 def __call__(self, f):
338 def __call__(self, f):
339 if f in self._newfiles:
339 if f in self._newfiles:
340 return
340 return
341 fl = encoding.lower(f)
341 fl = encoding.lower(f)
342 if fl in self._loweredfiles and f not in self._dirstate:
342 if fl in self._loweredfiles and f not in self._dirstate:
343 msg = _(b'possible case-folding collision for %s') % f
343 msg = _(b'possible case-folding collision for %s') % f
344 if self._abort:
344 if self._abort:
345 raise error.Abort(msg)
345 raise error.Abort(msg)
346 self._ui.warn(_(b"warning: %s\n") % msg)
346 self._ui.warn(_(b"warning: %s\n") % msg)
347 self._loweredfiles.add(fl)
347 self._loweredfiles.add(fl)
348 self._newfiles.add(f)
348 self._newfiles.add(f)
349
349
350
350
351 def filteredhash(repo, maxrev):
351 def filteredhash(repo, maxrev):
352 """build hash of filtered revisions in the current repoview.
352 """build hash of filtered revisions in the current repoview.
353
353
354 Multiple caches perform up-to-date validation by checking that the
354 Multiple caches perform up-to-date validation by checking that the
355 tiprev and tipnode stored in the cache file match the current repository.
355 tiprev and tipnode stored in the cache file match the current repository.
356 However, this is not sufficient for validating repoviews because the set
356 However, this is not sufficient for validating repoviews because the set
357 of revisions in the view may change without the repository tiprev and
357 of revisions in the view may change without the repository tiprev and
358 tipnode changing.
358 tipnode changing.
359
359
360 This function hashes all the revs filtered from the view and returns
360 This function hashes all the revs filtered from the view and returns
361 that SHA-1 digest.
361 that SHA-1 digest.
362 """
362 """
363 cl = repo.changelog
363 cl = repo.changelog
364 if not cl.filteredrevs:
364 if not cl.filteredrevs:
365 return None
365 return None
366 key = None
366 key = None
367 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
367 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
368 if revs:
368 if revs:
369 s = hashutil.sha1()
369 s = hashutil.sha1()
370 for rev in revs:
370 for rev in revs:
371 s.update(b'%d;' % rev)
371 s.update(b'%d;' % rev)
372 key = s.digest()
372 key = s.digest()
373 return key
373 return key
374
374
375
375
376 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
376 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
377 '''yield every hg repository under path, always recursively.
377 '''yield every hg repository under path, always recursively.
378 The recurse flag will only control recursion into repo working dirs'''
378 The recurse flag will only control recursion into repo working dirs'''
379
379
380 def errhandler(err):
380 def errhandler(err):
381 if err.filename == path:
381 if err.filename == path:
382 raise err
382 raise err
383
383
384 samestat = getattr(os.path, 'samestat', None)
384 samestat = getattr(os.path, 'samestat', None)
385 if followsym and samestat is not None:
385 if followsym and samestat is not None:
386
386
387 def adddir(dirlst, dirname):
387 def adddir(dirlst, dirname):
388 dirstat = os.stat(dirname)
388 dirstat = os.stat(dirname)
389 match = any(samestat(dirstat, lstdirstat) for lstdirstat in dirlst)
389 match = any(samestat(dirstat, lstdirstat) for lstdirstat in dirlst)
390 if not match:
390 if not match:
391 dirlst.append(dirstat)
391 dirlst.append(dirstat)
392 return not match
392 return not match
393
393
394 else:
394 else:
395 followsym = False
395 followsym = False
396
396
397 if (seen_dirs is None) and followsym:
397 if (seen_dirs is None) and followsym:
398 seen_dirs = []
398 seen_dirs = []
399 adddir(seen_dirs, path)
399 adddir(seen_dirs, path)
400 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
400 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
401 dirs.sort()
401 dirs.sort()
402 if b'.hg' in dirs:
402 if b'.hg' in dirs:
403 yield root # found a repository
403 yield root # found a repository
404 qroot = os.path.join(root, b'.hg', b'patches')
404 qroot = os.path.join(root, b'.hg', b'patches')
405 if os.path.isdir(os.path.join(qroot, b'.hg')):
405 if os.path.isdir(os.path.join(qroot, b'.hg')):
406 yield qroot # we have a patch queue repo here
406 yield qroot # we have a patch queue repo here
407 if recurse:
407 if recurse:
408 # avoid recursing inside the .hg directory
408 # avoid recursing inside the .hg directory
409 dirs.remove(b'.hg')
409 dirs.remove(b'.hg')
410 else:
410 else:
411 dirs[:] = [] # don't descend further
411 dirs[:] = [] # don't descend further
412 elif followsym:
412 elif followsym:
413 newdirs = []
413 newdirs = []
414 for d in dirs:
414 for d in dirs:
415 fname = os.path.join(root, d)
415 fname = os.path.join(root, d)
416 if adddir(seen_dirs, fname):
416 if adddir(seen_dirs, fname):
417 if os.path.islink(fname):
417 if os.path.islink(fname):
418 for hgname in walkrepos(fname, True, seen_dirs):
418 for hgname in walkrepos(fname, True, seen_dirs):
419 yield hgname
419 yield hgname
420 else:
420 else:
421 newdirs.append(d)
421 newdirs.append(d)
422 dirs[:] = newdirs
422 dirs[:] = newdirs
423
423
424
424
425 def binnode(ctx):
425 def binnode(ctx):
426 """Return binary node id for a given basectx"""
426 """Return binary node id for a given basectx"""
427 node = ctx.node()
427 node = ctx.node()
428 if node is None:
428 if node is None:
429 return wdirid
429 return wdirid
430 return node
430 return node
431
431
432
432
433 def intrev(ctx):
433 def intrev(ctx):
434 """Return integer for a given basectx that can be used in comparison or
434 """Return integer for a given basectx that can be used in comparison or
435 arithmetic operation"""
435 arithmetic operation"""
436 rev = ctx.rev()
436 rev = ctx.rev()
437 if rev is None:
437 if rev is None:
438 return wdirrev
438 return wdirrev
439 return rev
439 return rev
440
440
441
441
442 def formatchangeid(ctx):
442 def formatchangeid(ctx):
443 """Format changectx as '{rev}:{node|formatnode}', which is the default
443 """Format changectx as '{rev}:{node|formatnode}', which is the default
444 template provided by logcmdutil.changesettemplater"""
444 template provided by logcmdutil.changesettemplater"""
445 repo = ctx.repo()
445 repo = ctx.repo()
446 return formatrevnode(repo.ui, intrev(ctx), binnode(ctx))
446 return formatrevnode(repo.ui, intrev(ctx), binnode(ctx))
447
447
448
448
449 def formatrevnode(ui, rev, node):
449 def formatrevnode(ui, rev, node):
450 """Format given revision and node depending on the current verbosity"""
450 """Format given revision and node depending on the current verbosity"""
451 if ui.debugflag:
451 if ui.debugflag:
452 hexfunc = hex
452 hexfunc = hex
453 else:
453 else:
454 hexfunc = short
454 hexfunc = short
455 return b'%d:%s' % (rev, hexfunc(node))
455 return b'%d:%s' % (rev, hexfunc(node))
456
456
457
457
458 def resolvehexnodeidprefix(repo, prefix):
458 def resolvehexnodeidprefix(repo, prefix):
459 if prefix.startswith(b'x') and repo.ui.configbool(
459 if prefix.startswith(b'x') and repo.ui.configbool(
460 b'experimental', b'revisions.prefixhexnode'
460 b'experimental', b'revisions.prefixhexnode'
461 ):
461 ):
462 prefix = prefix[1:]
462 prefix = prefix[1:]
463 try:
463 try:
464 # Uses unfiltered repo because it's faster when prefix is ambiguous/
464 # Uses unfiltered repo because it's faster when prefix is ambiguous/
465 # This matches the shortesthexnodeidprefix() function below.
465 # This matches the shortesthexnodeidprefix() function below.
466 node = repo.unfiltered().changelog._partialmatch(prefix)
466 node = repo.unfiltered().changelog._partialmatch(prefix)
467 except error.AmbiguousPrefixLookupError:
467 except error.AmbiguousPrefixLookupError:
468 revset = repo.ui.config(
468 revset = repo.ui.config(
469 b'experimental', b'revisions.disambiguatewithin'
469 b'experimental', b'revisions.disambiguatewithin'
470 )
470 )
471 if revset:
471 if revset:
472 # Clear config to avoid infinite recursion
472 # Clear config to avoid infinite recursion
473 configoverrides = {
473 configoverrides = {
474 (b'experimental', b'revisions.disambiguatewithin'): None
474 (b'experimental', b'revisions.disambiguatewithin'): None
475 }
475 }
476 with repo.ui.configoverride(configoverrides):
476 with repo.ui.configoverride(configoverrides):
477 revs = repo.anyrevs([revset], user=True)
477 revs = repo.anyrevs([revset], user=True)
478 matches = []
478 matches = []
479 for rev in revs:
479 for rev in revs:
480 node = repo.changelog.node(rev)
480 node = repo.changelog.node(rev)
481 if hex(node).startswith(prefix):
481 if hex(node).startswith(prefix):
482 matches.append(node)
482 matches.append(node)
483 if len(matches) == 1:
483 if len(matches) == 1:
484 return matches[0]
484 return matches[0]
485 raise
485 raise
486 if node is None:
486 if node is None:
487 return
487 return
488 repo.changelog.rev(node) # make sure node isn't filtered
488 repo.changelog.rev(node) # make sure node isn't filtered
489 return node
489 return node
490
490
491
491
492 def mayberevnum(repo, prefix):
492 def mayberevnum(repo, prefix):
493 """Checks if the given prefix may be mistaken for a revision number"""
493 """Checks if the given prefix may be mistaken for a revision number"""
494 try:
494 try:
495 i = int(prefix)
495 i = int(prefix)
496 # if we are a pure int, then starting with zero will not be
496 # if we are a pure int, then starting with zero will not be
497 # confused as a rev; or, obviously, if the int is larger
497 # confused as a rev; or, obviously, if the int is larger
498 # than the value of the tip rev. We still need to disambiguate if
498 # than the value of the tip rev. We still need to disambiguate if
499 # prefix == '0', since that *is* a valid revnum.
499 # prefix == '0', since that *is* a valid revnum.
500 if (prefix != b'0' and prefix[0:1] == b'0') or i >= len(repo):
500 if (prefix != b'0' and prefix[0:1] == b'0') or i >= len(repo):
501 return False
501 return False
502 return True
502 return True
503 except ValueError:
503 except ValueError:
504 return False
504 return False
505
505
506
506
507 def shortesthexnodeidprefix(repo, node, minlength=1, cache=None):
507 def shortesthexnodeidprefix(repo, node, minlength=1, cache=None):
508 """Find the shortest unambiguous prefix that matches hexnode.
508 """Find the shortest unambiguous prefix that matches hexnode.
509
509
510 If "cache" is not None, it must be a dictionary that can be used for
510 If "cache" is not None, it must be a dictionary that can be used for
511 caching between calls to this method.
511 caching between calls to this method.
512 """
512 """
513 # _partialmatch() of filtered changelog could take O(len(repo)) time,
513 # _partialmatch() of filtered changelog could take O(len(repo)) time,
514 # which would be unacceptably slow. so we look for hash collision in
514 # which would be unacceptably slow. so we look for hash collision in
515 # unfiltered space, which means some hashes may be slightly longer.
515 # unfiltered space, which means some hashes may be slightly longer.
516
516
517 minlength = max(minlength, 1)
517 minlength = max(minlength, 1)
518
518
519 def disambiguate(prefix):
519 def disambiguate(prefix):
520 """Disambiguate against revnums."""
520 """Disambiguate against revnums."""
521 if repo.ui.configbool(b'experimental', b'revisions.prefixhexnode'):
521 if repo.ui.configbool(b'experimental', b'revisions.prefixhexnode'):
522 if mayberevnum(repo, prefix):
522 if mayberevnum(repo, prefix):
523 return b'x' + prefix
523 return b'x' + prefix
524 else:
524 else:
525 return prefix
525 return prefix
526
526
527 hexnode = hex(node)
527 hexnode = hex(node)
528 for length in range(len(prefix), len(hexnode) + 1):
528 for length in range(len(prefix), len(hexnode) + 1):
529 prefix = hexnode[:length]
529 prefix = hexnode[:length]
530 if not mayberevnum(repo, prefix):
530 if not mayberevnum(repo, prefix):
531 return prefix
531 return prefix
532
532
533 cl = repo.unfiltered().changelog
533 cl = repo.unfiltered().changelog
534 revset = repo.ui.config(b'experimental', b'revisions.disambiguatewithin')
534 revset = repo.ui.config(b'experimental', b'revisions.disambiguatewithin')
535 if revset:
535 if revset:
536 revs = None
536 revs = None
537 if cache is not None:
537 if cache is not None:
538 revs = cache.get(b'disambiguationrevset')
538 revs = cache.get(b'disambiguationrevset')
539 if revs is None:
539 if revs is None:
540 revs = repo.anyrevs([revset], user=True)
540 revs = repo.anyrevs([revset], user=True)
541 if cache is not None:
541 if cache is not None:
542 cache[b'disambiguationrevset'] = revs
542 cache[b'disambiguationrevset'] = revs
543 if cl.rev(node) in revs:
543 if cl.rev(node) in revs:
544 hexnode = hex(node)
544 hexnode = hex(node)
545 nodetree = None
545 nodetree = None
546 if cache is not None:
546 if cache is not None:
547 nodetree = cache.get(b'disambiguationnodetree')
547 nodetree = cache.get(b'disambiguationnodetree')
548 if not nodetree:
548 if not nodetree:
549 if util.safehasattr(parsers, 'nodetree'):
549 if util.safehasattr(parsers, 'nodetree'):
550 # The CExt is the only implementation to provide a nodetree
550 # The CExt is the only implementation to provide a nodetree
551 # class so far.
551 # class so far.
552 index = cl.index
552 index = cl.index
553 if util.safehasattr(index, 'get_cindex'):
553 if util.safehasattr(index, 'get_cindex'):
554 # the rust wrapped need to give access to its internal index
554 # the rust wrapped need to give access to its internal index
555 index = index.get_cindex()
555 index = index.get_cindex()
556 nodetree = parsers.nodetree(index, len(revs))
556 nodetree = parsers.nodetree(index, len(revs))
557 for r in revs:
557 for r in revs:
558 nodetree.insert(r)
558 nodetree.insert(r)
559 if cache is not None:
559 if cache is not None:
560 cache[b'disambiguationnodetree'] = nodetree
560 cache[b'disambiguationnodetree'] = nodetree
561 if nodetree is not None:
561 if nodetree is not None:
562 length = max(nodetree.shortest(node), minlength)
562 length = max(nodetree.shortest(node), minlength)
563 prefix = hexnode[:length]
563 prefix = hexnode[:length]
564 return disambiguate(prefix)
564 return disambiguate(prefix)
565 for length in range(minlength, len(hexnode) + 1):
565 for length in range(minlength, len(hexnode) + 1):
566 matches = []
566 matches = []
567 prefix = hexnode[:length]
567 prefix = hexnode[:length]
568 for rev in revs:
568 for rev in revs:
569 otherhexnode = repo[rev].hex()
569 otherhexnode = repo[rev].hex()
570 if prefix == otherhexnode[:length]:
570 if prefix == otherhexnode[:length]:
571 matches.append(otherhexnode)
571 matches.append(otherhexnode)
572 if len(matches) == 1:
572 if len(matches) == 1:
573 return disambiguate(prefix)
573 return disambiguate(prefix)
574
574
575 try:
575 try:
576 return disambiguate(cl.shortest(node, minlength))
576 return disambiguate(cl.shortest(node, minlength))
577 except error.LookupError:
577 except error.LookupError:
578 raise error.RepoLookupError()
578 raise error.RepoLookupError()
579
579
580
580
581 def isrevsymbol(repo, symbol):
581 def isrevsymbol(repo, symbol):
582 """Checks if a symbol exists in the repo.
582 """Checks if a symbol exists in the repo.
583
583
584 See revsymbol() for details. Raises error.AmbiguousPrefixLookupError if the
584 See revsymbol() for details. Raises error.AmbiguousPrefixLookupError if the
585 symbol is an ambiguous nodeid prefix.
585 symbol is an ambiguous nodeid prefix.
586 """
586 """
587 try:
587 try:
588 revsymbol(repo, symbol)
588 revsymbol(repo, symbol)
589 return True
589 return True
590 except error.RepoLookupError:
590 except error.RepoLookupError:
591 return False
591 return False
592
592
593
593
594 def revsymbol(repo, symbol):
594 def revsymbol(repo, symbol):
595 """Returns a context given a single revision symbol (as string).
595 """Returns a context given a single revision symbol (as string).
596
596
597 This is similar to revsingle(), but accepts only a single revision symbol,
597 This is similar to revsingle(), but accepts only a single revision symbol,
598 i.e. things like ".", "tip", "1234", "deadbeef", "my-bookmark" work, but
598 i.e. things like ".", "tip", "1234", "deadbeef", "my-bookmark" work, but
599 not "max(public())".
599 not "max(public())".
600 """
600 """
601 if not isinstance(symbol, bytes):
601 if not isinstance(symbol, bytes):
602 msg = (
602 msg = (
603 b"symbol (%s of type %s) was not a string, did you mean "
603 b"symbol (%s of type %s) was not a string, did you mean "
604 b"repo[symbol]?" % (symbol, type(symbol))
604 b"repo[symbol]?" % (symbol, type(symbol))
605 )
605 )
606 raise error.ProgrammingError(msg)
606 raise error.ProgrammingError(msg)
607 try:
607 try:
608 if symbol in (b'.', b'tip', b'null'):
608 if symbol in (b'.', b'tip', b'null'):
609 return repo[symbol]
609 return repo[symbol]
610
610
611 try:
611 try:
612 r = int(symbol)
612 r = int(symbol)
613 if b'%d' % r != symbol:
613 if b'%d' % r != symbol:
614 raise ValueError
614 raise ValueError
615 l = len(repo.changelog)
615 l = len(repo.changelog)
616 if r < 0:
616 if r < 0:
617 r += l
617 r += l
618 if r < 0 or r >= l and r != wdirrev:
618 if r < 0 or r >= l and r != wdirrev:
619 raise ValueError
619 raise ValueError
620 return repo[r]
620 return repo[r]
621 except error.FilteredIndexError:
621 except error.FilteredIndexError:
622 raise
622 raise
623 except (ValueError, OverflowError, IndexError):
623 except (ValueError, OverflowError, IndexError):
624 pass
624 pass
625
625
626 if len(symbol) == 40:
626 if len(symbol) == 40:
627 try:
627 try:
628 node = bin(symbol)
628 node = bin(symbol)
629 rev = repo.changelog.rev(node)
629 rev = repo.changelog.rev(node)
630 return repo[rev]
630 return repo[rev]
631 except error.FilteredLookupError:
631 except error.FilteredLookupError:
632 raise
632 raise
633 except (TypeError, LookupError):
633 except (TypeError, LookupError):
634 pass
634 pass
635
635
636 # look up bookmarks through the name interface
636 # look up bookmarks through the name interface
637 try:
637 try:
638 node = repo.names.singlenode(repo, symbol)
638 node = repo.names.singlenode(repo, symbol)
639 rev = repo.changelog.rev(node)
639 rev = repo.changelog.rev(node)
640 return repo[rev]
640 return repo[rev]
641 except KeyError:
641 except KeyError:
642 pass
642 pass
643
643
644 node = resolvehexnodeidprefix(repo, symbol)
644 node = resolvehexnodeidprefix(repo, symbol)
645 if node is not None:
645 if node is not None:
646 rev = repo.changelog.rev(node)
646 rev = repo.changelog.rev(node)
647 return repo[rev]
647 return repo[rev]
648
648
649 raise error.RepoLookupError(_(b"unknown revision '%s'") % symbol)
649 raise error.RepoLookupError(_(b"unknown revision '%s'") % symbol)
650
650
651 except error.WdirUnsupported:
651 except error.WdirUnsupported:
652 return repo[None]
652 return repo[None]
653 except (
653 except (
654 error.FilteredIndexError,
654 error.FilteredIndexError,
655 error.FilteredLookupError,
655 error.FilteredLookupError,
656 error.FilteredRepoLookupError,
656 error.FilteredRepoLookupError,
657 ):
657 ):
658 raise _filterederror(repo, symbol)
658 raise _filterederror(repo, symbol)
659
659
660
660
661 def _filterederror(repo, changeid):
661 def _filterederror(repo, changeid):
662 """build an exception to be raised about a filtered changeid
662 """build an exception to be raised about a filtered changeid
663
663
664 This is extracted in a function to help extensions (eg: evolve) to
664 This is extracted in a function to help extensions (eg: evolve) to
665 experiment with various message variants."""
665 experiment with various message variants."""
666 if repo.filtername.startswith(b'visible'):
666 if repo.filtername.startswith(b'visible'):
667
667
668 # Check if the changeset is obsolete
668 # Check if the changeset is obsolete
669 unfilteredrepo = repo.unfiltered()
669 unfilteredrepo = repo.unfiltered()
670 ctx = revsymbol(unfilteredrepo, changeid)
670 ctx = revsymbol(unfilteredrepo, changeid)
671
671
672 # If the changeset is obsolete, enrich the message with the reason
672 # If the changeset is obsolete, enrich the message with the reason
673 # that made this changeset not visible
673 # that made this changeset not visible
674 if ctx.obsolete():
674 if ctx.obsolete():
675 msg = obsutil._getfilteredreason(repo, changeid, ctx)
675 msg = obsutil._getfilteredreason(repo, changeid, ctx)
676 else:
676 else:
677 msg = _(b"hidden revision '%s'") % changeid
677 msg = _(b"hidden revision '%s'") % changeid
678
678
679 hint = _(b'use --hidden to access hidden revisions')
679 hint = _(b'use --hidden to access hidden revisions')
680
680
681 return error.FilteredRepoLookupError(msg, hint=hint)
681 return error.FilteredRepoLookupError(msg, hint=hint)
682 msg = _(b"filtered revision '%s' (not in '%s' subset)")
682 msg = _(b"filtered revision '%s' (not in '%s' subset)")
683 msg %= (changeid, repo.filtername)
683 msg %= (changeid, repo.filtername)
684 return error.FilteredRepoLookupError(msg)
684 return error.FilteredRepoLookupError(msg)
685
685
686
686
687 def revsingle(repo, revspec, default=b'.', localalias=None):
687 def revsingle(repo, revspec, default=b'.', localalias=None):
688 if not revspec and revspec != 0:
688 if not revspec and revspec != 0:
689 return repo[default]
689 return repo[default]
690
690
691 l = revrange(repo, [revspec], localalias=localalias)
691 l = revrange(repo, [revspec], localalias=localalias)
692 if not l:
692 if not l:
693 raise error.Abort(_(b'empty revision set'))
693 raise error.Abort(_(b'empty revision set'))
694 return repo[l.last()]
694 return repo[l.last()]
695
695
696
696
697 def _pairspec(revspec):
697 def _pairspec(revspec):
698 tree = revsetlang.parse(revspec)
698 tree = revsetlang.parse(revspec)
699 return tree and tree[0] in (
699 return tree and tree[0] in (
700 b'range',
700 b'range',
701 b'rangepre',
701 b'rangepre',
702 b'rangepost',
702 b'rangepost',
703 b'rangeall',
703 b'rangeall',
704 )
704 )
705
705
706
706
707 def revpair(repo, revs):
707 def revpair(repo, revs):
708 if not revs:
708 if not revs:
709 return repo[b'.'], repo[None]
709 return repo[b'.'], repo[None]
710
710
711 l = revrange(repo, revs)
711 l = revrange(repo, revs)
712
712
713 if not l:
713 if not l:
714 raise error.Abort(_(b'empty revision range'))
714 raise error.Abort(_(b'empty revision range'))
715
715
716 first = l.first()
716 first = l.first()
717 second = l.last()
717 second = l.last()
718
718
719 if (
719 if (
720 first == second
720 first == second
721 and len(revs) >= 2
721 and len(revs) >= 2
722 and not all(revrange(repo, [r]) for r in revs)
722 and not all(revrange(repo, [r]) for r in revs)
723 ):
723 ):
724 raise error.Abort(_(b'empty revision on one side of range'))
724 raise error.Abort(_(b'empty revision on one side of range'))
725
725
726 # if top-level is range expression, the result must always be a pair
726 # if top-level is range expression, the result must always be a pair
727 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
727 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
728 return repo[first], repo[None]
728 return repo[first], repo[None]
729
729
730 return repo[first], repo[second]
730 return repo[first], repo[second]
731
731
732
732
733 def revrange(repo, specs, localalias=None):
733 def revrange(repo, specs, localalias=None):
734 """Execute 1 to many revsets and return the union.
734 """Execute 1 to many revsets and return the union.
735
735
736 This is the preferred mechanism for executing revsets using user-specified
736 This is the preferred mechanism for executing revsets using user-specified
737 config options, such as revset aliases.
737 config options, such as revset aliases.
738
738
739 The revsets specified by ``specs`` will be executed via a chained ``OR``
739 The revsets specified by ``specs`` will be executed via a chained ``OR``
740 expression. If ``specs`` is empty, an empty result is returned.
740 expression. If ``specs`` is empty, an empty result is returned.
741
741
742 ``specs`` can contain integers, in which case they are assumed to be
742 ``specs`` can contain integers, in which case they are assumed to be
743 revision numbers.
743 revision numbers.
744
744
745 It is assumed the revsets are already formatted. If you have arguments
745 It is assumed the revsets are already formatted. If you have arguments
746 that need to be expanded in the revset, call ``revsetlang.formatspec()``
746 that need to be expanded in the revset, call ``revsetlang.formatspec()``
747 and pass the result as an element of ``specs``.
747 and pass the result as an element of ``specs``.
748
748
749 Specifying a single revset is allowed.
749 Specifying a single revset is allowed.
750
750
751 Returns a ``smartset.abstractsmartset`` which is a list-like interface over
751 Returns a ``smartset.abstractsmartset`` which is a list-like interface over
752 integer revisions.
752 integer revisions.
753 """
753 """
754 allspecs = []
754 allspecs = []
755 for spec in specs:
755 for spec in specs:
756 if isinstance(spec, int):
756 if isinstance(spec, int):
757 spec = revsetlang.formatspec(b'%d', spec)
757 spec = revsetlang.formatspec(b'%d', spec)
758 allspecs.append(spec)
758 allspecs.append(spec)
759 return repo.anyrevs(allspecs, user=True, localalias=localalias)
759 return repo.anyrevs(allspecs, user=True, localalias=localalias)
760
760
761
761
762 def meaningfulparents(repo, ctx):
762 def meaningfulparents(repo, ctx):
763 """Return list of meaningful (or all if debug) parentrevs for rev.
763 """Return list of meaningful (or all if debug) parentrevs for rev.
764
764
765 For merges (two non-nullrev revisions) both parents are meaningful.
765 For merges (two non-nullrev revisions) both parents are meaningful.
766 Otherwise the first parent revision is considered meaningful if it
766 Otherwise the first parent revision is considered meaningful if it
767 is not the preceding revision.
767 is not the preceding revision.
768 """
768 """
769 parents = ctx.parents()
769 parents = ctx.parents()
770 if len(parents) > 1:
770 if len(parents) > 1:
771 return parents
771 return parents
772 if repo.ui.debugflag:
772 if repo.ui.debugflag:
773 return [parents[0], repo[nullrev]]
773 return [parents[0], repo[nullrev]]
774 if parents[0].rev() >= intrev(ctx) - 1:
774 if parents[0].rev() >= intrev(ctx) - 1:
775 return []
775 return []
776 return parents
776 return parents
777
777
778
778
779 def getuipathfn(repo, legacyrelativevalue=False, forcerelativevalue=None):
779 def getuipathfn(repo, legacyrelativevalue=False, forcerelativevalue=None):
780 """Return a function that produced paths for presenting to the user.
780 """Return a function that produced paths for presenting to the user.
781
781
782 The returned function takes a repo-relative path and produces a path
782 The returned function takes a repo-relative path and produces a path
783 that can be presented in the UI.
783 that can be presented in the UI.
784
784
785 Depending on the value of ui.relative-paths, either a repo-relative or
785 Depending on the value of ui.relative-paths, either a repo-relative or
786 cwd-relative path will be produced.
786 cwd-relative path will be produced.
787
787
788 legacyrelativevalue is the value to use if ui.relative-paths=legacy
788 legacyrelativevalue is the value to use if ui.relative-paths=legacy
789
789
790 If forcerelativevalue is not None, then that value will be used regardless
790 If forcerelativevalue is not None, then that value will be used regardless
791 of what ui.relative-paths is set to.
791 of what ui.relative-paths is set to.
792 """
792 """
793 if forcerelativevalue is not None:
793 if forcerelativevalue is not None:
794 relative = forcerelativevalue
794 relative = forcerelativevalue
795 else:
795 else:
796 config = repo.ui.config(b'ui', b'relative-paths')
796 config = repo.ui.config(b'ui', b'relative-paths')
797 if config == b'legacy':
797 if config == b'legacy':
798 relative = legacyrelativevalue
798 relative = legacyrelativevalue
799 else:
799 else:
800 relative = stringutil.parsebool(config)
800 relative = stringutil.parsebool(config)
801 if relative is None:
801 if relative is None:
802 raise error.ConfigError(
802 raise error.ConfigError(
803 _(b"ui.relative-paths is not a boolean ('%s')") % config
803 _(b"ui.relative-paths is not a boolean ('%s')") % config
804 )
804 )
805
805
806 if relative:
806 if relative:
807 cwd = repo.getcwd()
807 cwd = repo.getcwd()
808 pathto = repo.pathto
808 pathto = repo.pathto
809 return lambda f: pathto(f, cwd)
809 return lambda f: pathto(f, cwd)
810 elif repo.ui.configbool(b'ui', b'slash'):
810 elif repo.ui.configbool(b'ui', b'slash'):
811 return lambda f: f
811 return lambda f: f
812 else:
812 else:
813 return util.localpath
813 return util.localpath
814
814
815
815
816 def subdiruipathfn(subpath, uipathfn):
816 def subdiruipathfn(subpath, uipathfn):
817 '''Create a new uipathfn that treats the file as relative to subpath.'''
817 '''Create a new uipathfn that treats the file as relative to subpath.'''
818 return lambda f: uipathfn(posixpath.join(subpath, f))
818 return lambda f: uipathfn(posixpath.join(subpath, f))
819
819
820
820
821 def anypats(pats, opts):
821 def anypats(pats, opts):
822 '''Checks if any patterns, including --include and --exclude were given.
822 '''Checks if any patterns, including --include and --exclude were given.
823
823
824 Some commands (e.g. addremove) use this condition for deciding whether to
824 Some commands (e.g. addremove) use this condition for deciding whether to
825 print absolute or relative paths.
825 print absolute or relative paths.
826 '''
826 '''
827 return bool(pats or opts.get(b'include') or opts.get(b'exclude'))
827 return bool(pats or opts.get(b'include') or opts.get(b'exclude'))
828
828
829
829
830 def expandpats(pats):
830 def expandpats(pats):
831 '''Expand bare globs when running on windows.
831 '''Expand bare globs when running on windows.
832 On posix we assume it already has already been done by sh.'''
832 On posix we assume it already has already been done by sh.'''
833 if not util.expandglobs:
833 if not util.expandglobs:
834 return list(pats)
834 return list(pats)
835 ret = []
835 ret = []
836 for kindpat in pats:
836 for kindpat in pats:
837 kind, pat = matchmod._patsplit(kindpat, None)
837 kind, pat = matchmod._patsplit(kindpat, None)
838 if kind is None:
838 if kind is None:
839 try:
839 try:
840 globbed = glob.glob(pat)
840 globbed = glob.glob(pat)
841 except re.error:
841 except re.error:
842 globbed = [pat]
842 globbed = [pat]
843 if globbed:
843 if globbed:
844 ret.extend(globbed)
844 ret.extend(globbed)
845 continue
845 continue
846 ret.append(kindpat)
846 ret.append(kindpat)
847 return ret
847 return ret
848
848
849
849
850 def matchandpats(
850 def matchandpats(
851 ctx, pats=(), opts=None, globbed=False, default=b'relpath', badfn=None
851 ctx, pats=(), opts=None, globbed=False, default=b'relpath', badfn=None
852 ):
852 ):
853 '''Return a matcher and the patterns that were used.
853 '''Return a matcher and the patterns that were used.
854 The matcher will warn about bad matches, unless an alternate badfn callback
854 The matcher will warn about bad matches, unless an alternate badfn callback
855 is provided.'''
855 is provided.'''
856 if opts is None:
856 if opts is None:
857 opts = {}
857 opts = {}
858 if not globbed and default == b'relpath':
858 if not globbed and default == b'relpath':
859 pats = expandpats(pats or [])
859 pats = expandpats(pats or [])
860
860
861 uipathfn = getuipathfn(ctx.repo(), legacyrelativevalue=True)
861 uipathfn = getuipathfn(ctx.repo(), legacyrelativevalue=True)
862
862
863 def bad(f, msg):
863 def bad(f, msg):
864 ctx.repo().ui.warn(b"%s: %s\n" % (uipathfn(f), msg))
864 ctx.repo().ui.warn(b"%s: %s\n" % (uipathfn(f), msg))
865
865
866 if badfn is None:
866 if badfn is None:
867 badfn = bad
867 badfn = bad
868
868
869 m = ctx.match(
869 m = ctx.match(
870 pats,
870 pats,
871 opts.get(b'include'),
871 opts.get(b'include'),
872 opts.get(b'exclude'),
872 opts.get(b'exclude'),
873 default,
873 default,
874 listsubrepos=opts.get(b'subrepos'),
874 listsubrepos=opts.get(b'subrepos'),
875 badfn=badfn,
875 badfn=badfn,
876 )
876 )
877
877
878 if m.always():
878 if m.always():
879 pats = []
879 pats = []
880 return m, pats
880 return m, pats
881
881
882
882
883 def match(
883 def match(
884 ctx, pats=(), opts=None, globbed=False, default=b'relpath', badfn=None
884 ctx, pats=(), opts=None, globbed=False, default=b'relpath', badfn=None
885 ):
885 ):
886 '''Return a matcher that will warn about bad matches.'''
886 '''Return a matcher that will warn about bad matches.'''
887 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
887 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
888
888
889
889
890 def matchall(repo):
890 def matchall(repo):
891 '''Return a matcher that will efficiently match everything.'''
891 '''Return a matcher that will efficiently match everything.'''
892 return matchmod.always()
892 return matchmod.always()
893
893
894
894
895 def matchfiles(repo, files, badfn=None):
895 def matchfiles(repo, files, badfn=None):
896 '''Return a matcher that will efficiently match exactly these files.'''
896 '''Return a matcher that will efficiently match exactly these files.'''
897 return matchmod.exact(files, badfn=badfn)
897 return matchmod.exact(files, badfn=badfn)
898
898
899
899
900 def parsefollowlinespattern(repo, rev, pat, msg):
900 def parsefollowlinespattern(repo, rev, pat, msg):
901 """Return a file name from `pat` pattern suitable for usage in followlines
901 """Return a file name from `pat` pattern suitable for usage in followlines
902 logic.
902 logic.
903 """
903 """
904 if not matchmod.patkind(pat):
904 if not matchmod.patkind(pat):
905 return pathutil.canonpath(repo.root, repo.getcwd(), pat)
905 return pathutil.canonpath(repo.root, repo.getcwd(), pat)
906 else:
906 else:
907 ctx = repo[rev]
907 ctx = repo[rev]
908 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=ctx)
908 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=ctx)
909 files = [f for f in ctx if m(f)]
909 files = [f for f in ctx if m(f)]
910 if len(files) != 1:
910 if len(files) != 1:
911 raise error.ParseError(msg)
911 raise error.ParseError(msg)
912 return files[0]
912 return files[0]
913
913
914
914
915 def getorigvfs(ui, repo):
915 def getorigvfs(ui, repo):
916 """return a vfs suitable to save 'orig' file
916 """return a vfs suitable to save 'orig' file
917
917
918 return None if no special directory is configured"""
918 return None if no special directory is configured"""
919 origbackuppath = ui.config(b'ui', b'origbackuppath')
919 origbackuppath = ui.config(b'ui', b'origbackuppath')
920 if not origbackuppath:
920 if not origbackuppath:
921 return None
921 return None
922 return vfs.vfs(repo.wvfs.join(origbackuppath))
922 return vfs.vfs(repo.wvfs.join(origbackuppath))
923
923
924
924
925 def backuppath(ui, repo, filepath):
925 def backuppath(ui, repo, filepath):
926 '''customize where working copy backup files (.orig files) are created
926 '''customize where working copy backup files (.orig files) are created
927
927
928 Fetch user defined path from config file: [ui] origbackuppath = <path>
928 Fetch user defined path from config file: [ui] origbackuppath = <path>
929 Fall back to default (filepath with .orig suffix) if not specified
929 Fall back to default (filepath with .orig suffix) if not specified
930
930
931 filepath is repo-relative
931 filepath is repo-relative
932
932
933 Returns an absolute path
933 Returns an absolute path
934 '''
934 '''
935 origvfs = getorigvfs(ui, repo)
935 origvfs = getorigvfs(ui, repo)
936 if origvfs is None:
936 if origvfs is None:
937 return repo.wjoin(filepath + b".orig")
937 return repo.wjoin(filepath + b".orig")
938
938
939 origbackupdir = origvfs.dirname(filepath)
939 origbackupdir = origvfs.dirname(filepath)
940 if not origvfs.isdir(origbackupdir) or origvfs.islink(origbackupdir):
940 if not origvfs.isdir(origbackupdir) or origvfs.islink(origbackupdir):
941 ui.note(_(b'creating directory: %s\n') % origvfs.join(origbackupdir))
941 ui.note(_(b'creating directory: %s\n') % origvfs.join(origbackupdir))
942
942
943 # Remove any files that conflict with the backup file's path
943 # Remove any files that conflict with the backup file's path
944 for f in reversed(list(pathutil.finddirs(filepath))):
944 for f in reversed(list(pathutil.finddirs(filepath))):
945 if origvfs.isfileorlink(f):
945 if origvfs.isfileorlink(f):
946 ui.note(_(b'removing conflicting file: %s\n') % origvfs.join(f))
946 ui.note(_(b'removing conflicting file: %s\n') % origvfs.join(f))
947 origvfs.unlink(f)
947 origvfs.unlink(f)
948 break
948 break
949
949
950 origvfs.makedirs(origbackupdir)
950 origvfs.makedirs(origbackupdir)
951
951
952 if origvfs.isdir(filepath) and not origvfs.islink(filepath):
952 if origvfs.isdir(filepath) and not origvfs.islink(filepath):
953 ui.note(
953 ui.note(
954 _(b'removing conflicting directory: %s\n') % origvfs.join(filepath)
954 _(b'removing conflicting directory: %s\n') % origvfs.join(filepath)
955 )
955 )
956 origvfs.rmtree(filepath, forcibly=True)
956 origvfs.rmtree(filepath, forcibly=True)
957
957
958 return origvfs.join(filepath)
958 return origvfs.join(filepath)
959
959
960
960
961 class _containsnode(object):
961 class _containsnode(object):
962 """proxy __contains__(node) to container.__contains__ which accepts revs"""
962 """proxy __contains__(node) to container.__contains__ which accepts revs"""
963
963
964 def __init__(self, repo, revcontainer):
964 def __init__(self, repo, revcontainer):
965 self._torev = repo.changelog.rev
965 self._torev = repo.changelog.rev
966 self._revcontains = revcontainer.__contains__
966 self._revcontains = revcontainer.__contains__
967
967
968 def __contains__(self, node):
968 def __contains__(self, node):
969 return self._revcontains(self._torev(node))
969 return self._revcontains(self._torev(node))
970
970
971
971
972 def cleanupnodes(
972 def cleanupnodes(
973 repo,
973 repo,
974 replacements,
974 replacements,
975 operation,
975 operation,
976 moves=None,
976 moves=None,
977 metadata=None,
977 metadata=None,
978 fixphase=False,
978 fixphase=False,
979 targetphase=None,
979 targetphase=None,
980 backup=True,
980 backup=True,
981 ):
981 ):
982 """do common cleanups when old nodes are replaced by new nodes
982 """do common cleanups when old nodes are replaced by new nodes
983
983
984 That includes writing obsmarkers or stripping nodes, and moving bookmarks.
984 That includes writing obsmarkers or stripping nodes, and moving bookmarks.
985 (we might also want to move working directory parent in the future)
985 (we might also want to move working directory parent in the future)
986
986
987 By default, bookmark moves are calculated automatically from 'replacements',
987 By default, bookmark moves are calculated automatically from 'replacements',
988 but 'moves' can be used to override that. Also, 'moves' may include
988 but 'moves' can be used to override that. Also, 'moves' may include
989 additional bookmark moves that should not have associated obsmarkers.
989 additional bookmark moves that should not have associated obsmarkers.
990
990
991 replacements is {oldnode: [newnode]} or a iterable of nodes if they do not
991 replacements is {oldnode: [newnode]} or a iterable of nodes if they do not
992 have replacements. operation is a string, like "rebase".
992 have replacements. operation is a string, like "rebase".
993
993
994 metadata is dictionary containing metadata to be stored in obsmarker if
994 metadata is dictionary containing metadata to be stored in obsmarker if
995 obsolescence is enabled.
995 obsolescence is enabled.
996 """
996 """
997 assert fixphase or targetphase is None
997 assert fixphase or targetphase is None
998 if not replacements and not moves:
998 if not replacements and not moves:
999 return
999 return
1000
1000
1001 # translate mapping's other forms
1001 # translate mapping's other forms
1002 if not util.safehasattr(replacements, b'items'):
1002 if not util.safehasattr(replacements, b'items'):
1003 replacements = {(n,): () for n in replacements}
1003 replacements = {(n,): () for n in replacements}
1004 else:
1004 else:
1005 # upgrading non tuple "source" to tuple ones for BC
1005 # upgrading non tuple "source" to tuple ones for BC
1006 repls = {}
1006 repls = {}
1007 for key, value in replacements.items():
1007 for key, value in replacements.items():
1008 if not isinstance(key, tuple):
1008 if not isinstance(key, tuple):
1009 key = (key,)
1009 key = (key,)
1010 repls[key] = value
1010 repls[key] = value
1011 replacements = repls
1011 replacements = repls
1012
1012
1013 # Unfiltered repo is needed since nodes in replacements might be hidden.
1013 # Unfiltered repo is needed since nodes in replacements might be hidden.
1014 unfi = repo.unfiltered()
1014 unfi = repo.unfiltered()
1015
1015
1016 # Calculate bookmark movements
1016 # Calculate bookmark movements
1017 if moves is None:
1017 if moves is None:
1018 moves = {}
1018 moves = {}
1019 for oldnodes, newnodes in replacements.items():
1019 for oldnodes, newnodes in replacements.items():
1020 for oldnode in oldnodes:
1020 for oldnode in oldnodes:
1021 if oldnode in moves:
1021 if oldnode in moves:
1022 continue
1022 continue
1023 if len(newnodes) > 1:
1023 if len(newnodes) > 1:
1024 # usually a split, take the one with biggest rev number
1024 # usually a split, take the one with biggest rev number
1025 newnode = next(unfi.set(b'max(%ln)', newnodes)).node()
1025 newnode = next(unfi.set(b'max(%ln)', newnodes)).node()
1026 elif len(newnodes) == 0:
1026 elif len(newnodes) == 0:
1027 # move bookmark backwards
1027 # move bookmark backwards
1028 allreplaced = []
1028 allreplaced = []
1029 for rep in replacements:
1029 for rep in replacements:
1030 allreplaced.extend(rep)
1030 allreplaced.extend(rep)
1031 roots = list(
1031 roots = list(
1032 unfi.set(b'max((::%n) - %ln)', oldnode, allreplaced)
1032 unfi.set(b'max((::%n) - %ln)', oldnode, allreplaced)
1033 )
1033 )
1034 if roots:
1034 if roots:
1035 newnode = roots[0].node()
1035 newnode = roots[0].node()
1036 else:
1036 else:
1037 newnode = nullid
1037 newnode = nullid
1038 else:
1038 else:
1039 newnode = newnodes[0]
1039 newnode = newnodes[0]
1040 moves[oldnode] = newnode
1040 moves[oldnode] = newnode
1041
1041
1042 allnewnodes = [n for ns in replacements.values() for n in ns]
1042 allnewnodes = [n for ns in replacements.values() for n in ns]
1043 toretract = {}
1043 toretract = {}
1044 toadvance = {}
1044 toadvance = {}
1045 if fixphase:
1045 if fixphase:
1046 precursors = {}
1046 precursors = {}
1047 for oldnodes, newnodes in replacements.items():
1047 for oldnodes, newnodes in replacements.items():
1048 for oldnode in oldnodes:
1048 for oldnode in oldnodes:
1049 for newnode in newnodes:
1049 for newnode in newnodes:
1050 precursors.setdefault(newnode, []).append(oldnode)
1050 precursors.setdefault(newnode, []).append(oldnode)
1051
1051
1052 allnewnodes.sort(key=lambda n: unfi[n].rev())
1052 allnewnodes.sort(key=lambda n: unfi[n].rev())
1053 newphases = {}
1053 newphases = {}
1054
1054
1055 def phase(ctx):
1055 def phase(ctx):
1056 return newphases.get(ctx.node(), ctx.phase())
1056 return newphases.get(ctx.node(), ctx.phase())
1057
1057
1058 for newnode in allnewnodes:
1058 for newnode in allnewnodes:
1059 ctx = unfi[newnode]
1059 ctx = unfi[newnode]
1060 parentphase = max(phase(p) for p in ctx.parents())
1060 parentphase = max(phase(p) for p in ctx.parents())
1061 if targetphase is None:
1061 if targetphase is None:
1062 oldphase = max(
1062 oldphase = max(
1063 unfi[oldnode].phase() for oldnode in precursors[newnode]
1063 unfi[oldnode].phase() for oldnode in precursors[newnode]
1064 )
1064 )
1065 newphase = max(oldphase, parentphase)
1065 newphase = max(oldphase, parentphase)
1066 else:
1066 else:
1067 newphase = max(targetphase, parentphase)
1067 newphase = max(targetphase, parentphase)
1068 newphases[newnode] = newphase
1068 newphases[newnode] = newphase
1069 if newphase > ctx.phase():
1069 if newphase > ctx.phase():
1070 toretract.setdefault(newphase, []).append(newnode)
1070 toretract.setdefault(newphase, []).append(newnode)
1071 elif newphase < ctx.phase():
1071 elif newphase < ctx.phase():
1072 toadvance.setdefault(newphase, []).append(newnode)
1072 toadvance.setdefault(newphase, []).append(newnode)
1073
1073
1074 with repo.transaction(b'cleanup') as tr:
1074 with repo.transaction(b'cleanup') as tr:
1075 # Move bookmarks
1075 # Move bookmarks
1076 bmarks = repo._bookmarks
1076 bmarks = repo._bookmarks
1077 bmarkchanges = []
1077 bmarkchanges = []
1078 for oldnode, newnode in moves.items():
1078 for oldnode, newnode in moves.items():
1079 oldbmarks = repo.nodebookmarks(oldnode)
1079 oldbmarks = repo.nodebookmarks(oldnode)
1080 if not oldbmarks:
1080 if not oldbmarks:
1081 continue
1081 continue
1082 from . import bookmarks # avoid import cycle
1082 from . import bookmarks # avoid import cycle
1083
1083
1084 repo.ui.debug(
1084 repo.ui.debug(
1085 b'moving bookmarks %r from %s to %s\n'
1085 b'moving bookmarks %r from %s to %s\n'
1086 % (
1086 % (
1087 pycompat.rapply(pycompat.maybebytestr, oldbmarks),
1087 pycompat.rapply(pycompat.maybebytestr, oldbmarks),
1088 hex(oldnode),
1088 hex(oldnode),
1089 hex(newnode),
1089 hex(newnode),
1090 )
1090 )
1091 )
1091 )
1092 # Delete divergent bookmarks being parents of related newnodes
1092 # Delete divergent bookmarks being parents of related newnodes
1093 deleterevs = repo.revs(
1093 deleterevs = repo.revs(
1094 b'parents(roots(%ln & (::%n))) - parents(%n)',
1094 b'parents(roots(%ln & (::%n))) - parents(%n)',
1095 allnewnodes,
1095 allnewnodes,
1096 newnode,
1096 newnode,
1097 oldnode,
1097 oldnode,
1098 )
1098 )
1099 deletenodes = _containsnode(repo, deleterevs)
1099 deletenodes = _containsnode(repo, deleterevs)
1100 for name in oldbmarks:
1100 for name in oldbmarks:
1101 bmarkchanges.append((name, newnode))
1101 bmarkchanges.append((name, newnode))
1102 for b in bookmarks.divergent2delete(repo, deletenodes, name):
1102 for b in bookmarks.divergent2delete(repo, deletenodes, name):
1103 bmarkchanges.append((b, None))
1103 bmarkchanges.append((b, None))
1104
1104
1105 if bmarkchanges:
1105 if bmarkchanges:
1106 bmarks.applychanges(repo, tr, bmarkchanges)
1106 bmarks.applychanges(repo, tr, bmarkchanges)
1107
1107
1108 for phase, nodes in toretract.items():
1108 for phase, nodes in toretract.items():
1109 phases.retractboundary(repo, tr, phase, nodes)
1109 phases.retractboundary(repo, tr, phase, nodes)
1110 for phase, nodes in toadvance.items():
1110 for phase, nodes in toadvance.items():
1111 phases.advanceboundary(repo, tr, phase, nodes)
1111 phases.advanceboundary(repo, tr, phase, nodes)
1112
1112
1113 mayusearchived = repo.ui.config(b'experimental', b'cleanup-as-archived')
1113 mayusearchived = repo.ui.config(b'experimental', b'cleanup-as-archived')
1114 # Obsolete or strip nodes
1114 # Obsolete or strip nodes
1115 if obsolete.isenabled(repo, obsolete.createmarkersopt):
1115 if obsolete.isenabled(repo, obsolete.createmarkersopt):
1116 # If a node is already obsoleted, and we want to obsolete it
1116 # If a node is already obsoleted, and we want to obsolete it
1117 # without a successor, skip that obssolete request since it's
1117 # without a successor, skip that obssolete request since it's
1118 # unnecessary. That's the "if s or not isobs(n)" check below.
1118 # unnecessary. That's the "if s or not isobs(n)" check below.
1119 # Also sort the node in topology order, that might be useful for
1119 # Also sort the node in topology order, that might be useful for
1120 # some obsstore logic.
1120 # some obsstore logic.
1121 # NOTE: the sorting might belong to createmarkers.
1121 # NOTE: the sorting might belong to createmarkers.
1122 torev = unfi.changelog.rev
1122 torev = unfi.changelog.rev
1123 sortfunc = lambda ns: torev(ns[0][0])
1123 sortfunc = lambda ns: torev(ns[0][0])
1124 rels = []
1124 rels = []
1125 for ns, s in sorted(replacements.items(), key=sortfunc):
1125 for ns, s in sorted(replacements.items(), key=sortfunc):
1126 rel = (tuple(unfi[n] for n in ns), tuple(unfi[m] for m in s))
1126 rel = (tuple(unfi[n] for n in ns), tuple(unfi[m] for m in s))
1127 rels.append(rel)
1127 rels.append(rel)
1128 if rels:
1128 if rels:
1129 obsolete.createmarkers(
1129 obsolete.createmarkers(
1130 repo, rels, operation=operation, metadata=metadata
1130 repo, rels, operation=operation, metadata=metadata
1131 )
1131 )
1132 elif phases.supportinternal(repo) and mayusearchived:
1132 elif phases.supportinternal(repo) and mayusearchived:
1133 # this assume we do not have "unstable" nodes above the cleaned ones
1133 # this assume we do not have "unstable" nodes above the cleaned ones
1134 allreplaced = set()
1134 allreplaced = set()
1135 for ns in replacements.keys():
1135 for ns in replacements.keys():
1136 allreplaced.update(ns)
1136 allreplaced.update(ns)
1137 if backup:
1137 if backup:
1138 from . import repair # avoid import cycle
1138 from . import repair # avoid import cycle
1139
1139
1140 node = min(allreplaced, key=repo.changelog.rev)
1140 node = min(allreplaced, key=repo.changelog.rev)
1141 repair.backupbundle(
1141 repair.backupbundle(
1142 repo, allreplaced, allreplaced, node, operation
1142 repo, allreplaced, allreplaced, node, operation
1143 )
1143 )
1144 phases.retractboundary(repo, tr, phases.archived, allreplaced)
1144 phases.retractboundary(repo, tr, phases.archived, allreplaced)
1145 else:
1145 else:
1146 from . import repair # avoid import cycle
1146 from . import repair # avoid import cycle
1147
1147
1148 tostrip = list(n for ns in replacements for n in ns)
1148 tostrip = list(n for ns in replacements for n in ns)
1149 if tostrip:
1149 if tostrip:
1150 repair.delayedstrip(
1150 repair.delayedstrip(
1151 repo.ui, repo, tostrip, operation, backup=backup
1151 repo.ui, repo, tostrip, operation, backup=backup
1152 )
1152 )
1153
1153
1154
1154
1155 def addremove(repo, matcher, prefix, uipathfn, opts=None):
1155 def addremove(repo, matcher, prefix, uipathfn, opts=None):
1156 if opts is None:
1156 if opts is None:
1157 opts = {}
1157 opts = {}
1158 m = matcher
1158 m = matcher
1159 dry_run = opts.get(b'dry_run')
1159 dry_run = opts.get(b'dry_run')
1160 try:
1160 try:
1161 similarity = float(opts.get(b'similarity') or 0)
1161 similarity = float(opts.get(b'similarity') or 0)
1162 except ValueError:
1162 except ValueError:
1163 raise error.Abort(_(b'similarity must be a number'))
1163 raise error.Abort(_(b'similarity must be a number'))
1164 if similarity < 0 or similarity > 100:
1164 if similarity < 0 or similarity > 100:
1165 raise error.Abort(_(b'similarity must be between 0 and 100'))
1165 raise error.Abort(_(b'similarity must be between 0 and 100'))
1166 similarity /= 100.0
1166 similarity /= 100.0
1167
1167
1168 ret = 0
1168 ret = 0
1169
1169
1170 wctx = repo[None]
1170 wctx = repo[None]
1171 for subpath in sorted(wctx.substate):
1171 for subpath in sorted(wctx.substate):
1172 submatch = matchmod.subdirmatcher(subpath, m)
1172 submatch = matchmod.subdirmatcher(subpath, m)
1173 if opts.get(b'subrepos') or m.exact(subpath) or any(submatch.files()):
1173 if opts.get(b'subrepos') or m.exact(subpath) or any(submatch.files()):
1174 sub = wctx.sub(subpath)
1174 sub = wctx.sub(subpath)
1175 subprefix = repo.wvfs.reljoin(prefix, subpath)
1175 subprefix = repo.wvfs.reljoin(prefix, subpath)
1176 subuipathfn = subdiruipathfn(subpath, uipathfn)
1176 subuipathfn = subdiruipathfn(subpath, uipathfn)
1177 try:
1177 try:
1178 if sub.addremove(submatch, subprefix, subuipathfn, opts):
1178 if sub.addremove(submatch, subprefix, subuipathfn, opts):
1179 ret = 1
1179 ret = 1
1180 except error.LookupError:
1180 except error.LookupError:
1181 repo.ui.status(
1181 repo.ui.status(
1182 _(b"skipping missing subrepository: %s\n")
1182 _(b"skipping missing subrepository: %s\n")
1183 % uipathfn(subpath)
1183 % uipathfn(subpath)
1184 )
1184 )
1185
1185
1186 rejected = []
1186 rejected = []
1187
1187
1188 def badfn(f, msg):
1188 def badfn(f, msg):
1189 if f in m.files():
1189 if f in m.files():
1190 m.bad(f, msg)
1190 m.bad(f, msg)
1191 rejected.append(f)
1191 rejected.append(f)
1192
1192
1193 badmatch = matchmod.badmatch(m, badfn)
1193 badmatch = matchmod.badmatch(m, badfn)
1194 added, unknown, deleted, removed, forgotten = _interestingfiles(
1194 added, unknown, deleted, removed, forgotten = _interestingfiles(
1195 repo, badmatch
1195 repo, badmatch
1196 )
1196 )
1197
1197
1198 unknownset = set(unknown + forgotten)
1198 unknownset = set(unknown + forgotten)
1199 toprint = unknownset.copy()
1199 toprint = unknownset.copy()
1200 toprint.update(deleted)
1200 toprint.update(deleted)
1201 for abs in sorted(toprint):
1201 for abs in sorted(toprint):
1202 if repo.ui.verbose or not m.exact(abs):
1202 if repo.ui.verbose or not m.exact(abs):
1203 if abs in unknownset:
1203 if abs in unknownset:
1204 status = _(b'adding %s\n') % uipathfn(abs)
1204 status = _(b'adding %s\n') % uipathfn(abs)
1205 label = b'ui.addremove.added'
1205 label = b'ui.addremove.added'
1206 else:
1206 else:
1207 status = _(b'removing %s\n') % uipathfn(abs)
1207 status = _(b'removing %s\n') % uipathfn(abs)
1208 label = b'ui.addremove.removed'
1208 label = b'ui.addremove.removed'
1209 repo.ui.status(status, label=label)
1209 repo.ui.status(status, label=label)
1210
1210
1211 renames = _findrenames(
1211 renames = _findrenames(
1212 repo, m, added + unknown, removed + deleted, similarity, uipathfn
1212 repo, m, added + unknown, removed + deleted, similarity, uipathfn
1213 )
1213 )
1214
1214
1215 if not dry_run:
1215 if not dry_run:
1216 _markchanges(repo, unknown + forgotten, deleted, renames)
1216 _markchanges(repo, unknown + forgotten, deleted, renames)
1217
1217
1218 for f in rejected:
1218 for f in rejected:
1219 if f in m.files():
1219 if f in m.files():
1220 return 1
1220 return 1
1221 return ret
1221 return ret
1222
1222
1223
1223
1224 def marktouched(repo, files, similarity=0.0):
1224 def marktouched(repo, files, similarity=0.0):
1225 '''Assert that files have somehow been operated upon. files are relative to
1225 '''Assert that files have somehow been operated upon. files are relative to
1226 the repo root.'''
1226 the repo root.'''
1227 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
1227 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
1228 rejected = []
1228 rejected = []
1229
1229
1230 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
1230 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
1231
1231
1232 if repo.ui.verbose:
1232 if repo.ui.verbose:
1233 unknownset = set(unknown + forgotten)
1233 unknownset = set(unknown + forgotten)
1234 toprint = unknownset.copy()
1234 toprint = unknownset.copy()
1235 toprint.update(deleted)
1235 toprint.update(deleted)
1236 for abs in sorted(toprint):
1236 for abs in sorted(toprint):
1237 if abs in unknownset:
1237 if abs in unknownset:
1238 status = _(b'adding %s\n') % abs
1238 status = _(b'adding %s\n') % abs
1239 else:
1239 else:
1240 status = _(b'removing %s\n') % abs
1240 status = _(b'removing %s\n') % abs
1241 repo.ui.status(status)
1241 repo.ui.status(status)
1242
1242
1243 # TODO: We should probably have the caller pass in uipathfn and apply it to
1243 # TODO: We should probably have the caller pass in uipathfn and apply it to
1244 # the messages above too. legacyrelativevalue=True is consistent with how
1244 # the messages above too. legacyrelativevalue=True is consistent with how
1245 # it used to work.
1245 # it used to work.
1246 uipathfn = getuipathfn(repo, legacyrelativevalue=True)
1246 uipathfn = getuipathfn(repo, legacyrelativevalue=True)
1247 renames = _findrenames(
1247 renames = _findrenames(
1248 repo, m, added + unknown, removed + deleted, similarity, uipathfn
1248 repo, m, added + unknown, removed + deleted, similarity, uipathfn
1249 )
1249 )
1250
1250
1251 _markchanges(repo, unknown + forgotten, deleted, renames)
1251 _markchanges(repo, unknown + forgotten, deleted, renames)
1252
1252
1253 for f in rejected:
1253 for f in rejected:
1254 if f in m.files():
1254 if f in m.files():
1255 return 1
1255 return 1
1256 return 0
1256 return 0
1257
1257
1258
1258
1259 def _interestingfiles(repo, matcher):
1259 def _interestingfiles(repo, matcher):
1260 '''Walk dirstate with matcher, looking for files that addremove would care
1260 '''Walk dirstate with matcher, looking for files that addremove would care
1261 about.
1261 about.
1262
1262
1263 This is different from dirstate.status because it doesn't care about
1263 This is different from dirstate.status because it doesn't care about
1264 whether files are modified or clean.'''
1264 whether files are modified or clean.'''
1265 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1265 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1266 audit_path = pathutil.pathauditor(repo.root, cached=True)
1266 audit_path = pathutil.pathauditor(repo.root, cached=True)
1267
1267
1268 ctx = repo[None]
1268 ctx = repo[None]
1269 dirstate = repo.dirstate
1269 dirstate = repo.dirstate
1270 matcher = repo.narrowmatch(matcher, includeexact=True)
1270 matcher = repo.narrowmatch(matcher, includeexact=True)
1271 walkresults = dirstate.walk(
1271 walkresults = dirstate.walk(
1272 matcher,
1272 matcher,
1273 subrepos=sorted(ctx.substate),
1273 subrepos=sorted(ctx.substate),
1274 unknown=True,
1274 unknown=True,
1275 ignored=False,
1275 ignored=False,
1276 full=False,
1276 full=False,
1277 )
1277 )
1278 for abs, st in pycompat.iteritems(walkresults):
1278 for abs, st in pycompat.iteritems(walkresults):
1279 dstate = dirstate[abs]
1279 dstate = dirstate[abs]
1280 if dstate == b'?' and audit_path.check(abs):
1280 if dstate == b'?' and audit_path.check(abs):
1281 unknown.append(abs)
1281 unknown.append(abs)
1282 elif dstate != b'r' and not st:
1282 elif dstate != b'r' and not st:
1283 deleted.append(abs)
1283 deleted.append(abs)
1284 elif dstate == b'r' and st:
1284 elif dstate == b'r' and st:
1285 forgotten.append(abs)
1285 forgotten.append(abs)
1286 # for finding renames
1286 # for finding renames
1287 elif dstate == b'r' and not st:
1287 elif dstate == b'r' and not st:
1288 removed.append(abs)
1288 removed.append(abs)
1289 elif dstate == b'a':
1289 elif dstate == b'a':
1290 added.append(abs)
1290 added.append(abs)
1291
1291
1292 return added, unknown, deleted, removed, forgotten
1292 return added, unknown, deleted, removed, forgotten
1293
1293
1294
1294
1295 def _findrenames(repo, matcher, added, removed, similarity, uipathfn):
1295 def _findrenames(repo, matcher, added, removed, similarity, uipathfn):
1296 '''Find renames from removed files to added ones.'''
1296 '''Find renames from removed files to added ones.'''
1297 renames = {}
1297 renames = {}
1298 if similarity > 0:
1298 if similarity > 0:
1299 for old, new, score in similar.findrenames(
1299 for old, new, score in similar.findrenames(
1300 repo, added, removed, similarity
1300 repo, added, removed, similarity
1301 ):
1301 ):
1302 if (
1302 if (
1303 repo.ui.verbose
1303 repo.ui.verbose
1304 or not matcher.exact(old)
1304 or not matcher.exact(old)
1305 or not matcher.exact(new)
1305 or not matcher.exact(new)
1306 ):
1306 ):
1307 repo.ui.status(
1307 repo.ui.status(
1308 _(
1308 _(
1309 b'recording removal of %s as rename to %s '
1309 b'recording removal of %s as rename to %s '
1310 b'(%d%% similar)\n'
1310 b'(%d%% similar)\n'
1311 )
1311 )
1312 % (uipathfn(old), uipathfn(new), score * 100)
1312 % (uipathfn(old), uipathfn(new), score * 100)
1313 )
1313 )
1314 renames[new] = old
1314 renames[new] = old
1315 return renames
1315 return renames
1316
1316
1317
1317
1318 def _markchanges(repo, unknown, deleted, renames):
1318 def _markchanges(repo, unknown, deleted, renames):
1319 '''Marks the files in unknown as added, the files in deleted as removed,
1319 '''Marks the files in unknown as added, the files in deleted as removed,
1320 and the files in renames as copied.'''
1320 and the files in renames as copied.'''
1321 wctx = repo[None]
1321 wctx = repo[None]
1322 with repo.wlock():
1322 with repo.wlock():
1323 wctx.forget(deleted)
1323 wctx.forget(deleted)
1324 wctx.add(unknown)
1324 wctx.add(unknown)
1325 for new, old in pycompat.iteritems(renames):
1325 for new, old in pycompat.iteritems(renames):
1326 wctx.copy(old, new)
1326 wctx.copy(old, new)
1327
1327
1328
1328
1329 def getrenamedfn(repo, endrev=None):
1329 def getrenamedfn(repo, endrev=None):
1330 if copiesmod.usechangesetcentricalgo(repo):
1330 if copiesmod.usechangesetcentricalgo(repo):
1331
1331
1332 def getrenamed(fn, rev):
1332 def getrenamed(fn, rev):
1333 ctx = repo[rev]
1333 ctx = repo[rev]
1334 p1copies = ctx.p1copies()
1334 p1copies = ctx.p1copies()
1335 if fn in p1copies:
1335 if fn in p1copies:
1336 return p1copies[fn]
1336 return p1copies[fn]
1337 p2copies = ctx.p2copies()
1337 p2copies = ctx.p2copies()
1338 if fn in p2copies:
1338 if fn in p2copies:
1339 return p2copies[fn]
1339 return p2copies[fn]
1340 return None
1340 return None
1341
1341
1342 return getrenamed
1342 return getrenamed
1343
1343
1344 rcache = {}
1344 rcache = {}
1345 if endrev is None:
1345 if endrev is None:
1346 endrev = len(repo)
1346 endrev = len(repo)
1347
1347
1348 def getrenamed(fn, rev):
1348 def getrenamed(fn, rev):
1349 '''looks up all renames for a file (up to endrev) the first
1349 '''looks up all renames for a file (up to endrev) the first
1350 time the file is given. It indexes on the changerev and only
1350 time the file is given. It indexes on the changerev and only
1351 parses the manifest if linkrev != changerev.
1351 parses the manifest if linkrev != changerev.
1352 Returns rename info for fn at changerev rev.'''
1352 Returns rename info for fn at changerev rev.'''
1353 if fn not in rcache:
1353 if fn not in rcache:
1354 rcache[fn] = {}
1354 rcache[fn] = {}
1355 fl = repo.file(fn)
1355 fl = repo.file(fn)
1356 for i in fl:
1356 for i in fl:
1357 lr = fl.linkrev(i)
1357 lr = fl.linkrev(i)
1358 renamed = fl.renamed(fl.node(i))
1358 renamed = fl.renamed(fl.node(i))
1359 rcache[fn][lr] = renamed and renamed[0]
1359 rcache[fn][lr] = renamed and renamed[0]
1360 if lr >= endrev:
1360 if lr >= endrev:
1361 break
1361 break
1362 if rev in rcache[fn]:
1362 if rev in rcache[fn]:
1363 return rcache[fn][rev]
1363 return rcache[fn][rev]
1364
1364
1365 # If linkrev != rev (i.e. rev not found in rcache) fallback to
1365 # If linkrev != rev (i.e. rev not found in rcache) fallback to
1366 # filectx logic.
1366 # filectx logic.
1367 try:
1367 try:
1368 return repo[rev][fn].copysource()
1368 return repo[rev][fn].copysource()
1369 except error.LookupError:
1369 except error.LookupError:
1370 return None
1370 return None
1371
1371
1372 return getrenamed
1372 return getrenamed
1373
1373
1374
1374
1375 def getcopiesfn(repo, endrev=None):
1375 def getcopiesfn(repo, endrev=None):
1376 if copiesmod.usechangesetcentricalgo(repo):
1376 if copiesmod.usechangesetcentricalgo(repo):
1377
1377
1378 def copiesfn(ctx):
1378 def copiesfn(ctx):
1379 if ctx.p2copies():
1379 if ctx.p2copies():
1380 allcopies = ctx.p1copies().copy()
1380 allcopies = ctx.p1copies().copy()
1381 # There should be no overlap
1381 # There should be no overlap
1382 allcopies.update(ctx.p2copies())
1382 allcopies.update(ctx.p2copies())
1383 return sorted(allcopies.items())
1383 return sorted(allcopies.items())
1384 else:
1384 else:
1385 return sorted(ctx.p1copies().items())
1385 return sorted(ctx.p1copies().items())
1386
1386
1387 else:
1387 else:
1388 getrenamed = getrenamedfn(repo, endrev)
1388 getrenamed = getrenamedfn(repo, endrev)
1389
1389
1390 def copiesfn(ctx):
1390 def copiesfn(ctx):
1391 copies = []
1391 copies = []
1392 for fn in ctx.files():
1392 for fn in ctx.files():
1393 rename = getrenamed(fn, ctx.rev())
1393 rename = getrenamed(fn, ctx.rev())
1394 if rename:
1394 if rename:
1395 copies.append((fn, rename))
1395 copies.append((fn, rename))
1396 return copies
1396 return copies
1397
1397
1398 return copiesfn
1398 return copiesfn
1399
1399
1400
1400
1401 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1401 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1402 """Update the dirstate to reflect the intent of copying src to dst. For
1402 """Update the dirstate to reflect the intent of copying src to dst. For
1403 different reasons it might not end with dst being marked as copied from src.
1403 different reasons it might not end with dst being marked as copied from src.
1404 """
1404 """
1405 origsrc = repo.dirstate.copied(src) or src
1405 origsrc = repo.dirstate.copied(src) or src
1406 if dst == origsrc: # copying back a copy?
1406 if dst == origsrc: # copying back a copy?
1407 if repo.dirstate[dst] not in b'mn' and not dryrun:
1407 if repo.dirstate[dst] not in b'mn' and not dryrun:
1408 repo.dirstate.normallookup(dst)
1408 repo.dirstate.normallookup(dst)
1409 else:
1409 else:
1410 if repo.dirstate[origsrc] == b'a' and origsrc == src:
1410 if repo.dirstate[origsrc] == b'a' and origsrc == src:
1411 if not ui.quiet:
1411 if not ui.quiet:
1412 ui.warn(
1412 ui.warn(
1413 _(
1413 _(
1414 b"%s has not been committed yet, so no copy "
1414 b"%s has not been committed yet, so no copy "
1415 b"data will be stored for %s.\n"
1415 b"data will be stored for %s.\n"
1416 )
1416 )
1417 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd))
1417 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd))
1418 )
1418 )
1419 if repo.dirstate[dst] in b'?r' and not dryrun:
1419 if repo.dirstate[dst] in b'?r' and not dryrun:
1420 wctx.add([dst])
1420 wctx.add([dst])
1421 elif not dryrun:
1421 elif not dryrun:
1422 wctx.copy(origsrc, dst)
1422 wctx.copy(origsrc, dst)
1423
1423
1424
1424
1425 def movedirstate(repo, newctx, match=None):
1425 def movedirstate(repo, newctx, match=None):
1426 """Move the dirstate to newctx and adjust it as necessary.
1426 """Move the dirstate to newctx and adjust it as necessary.
1427
1427
1428 A matcher can be provided as an optimization. It is probably a bug to pass
1428 A matcher can be provided as an optimization. It is probably a bug to pass
1429 a matcher that doesn't match all the differences between the parent of the
1429 a matcher that doesn't match all the differences between the parent of the
1430 working copy and newctx.
1430 working copy and newctx.
1431 """
1431 """
1432 oldctx = repo[b'.']
1432 oldctx = repo[b'.']
1433 ds = repo.dirstate
1433 ds = repo.dirstate
1434 copies = dict(ds.copies())
1434 copies = dict(ds.copies())
1435 ds.setparents(newctx.node(), nullid)
1435 ds.setparents(newctx.node(), nullid)
1436 s = newctx.status(oldctx, match=match)
1436 s = newctx.status(oldctx, match=match)
1437 for f in s.modified:
1437 for f in s.modified:
1438 if ds[f] == b'r':
1438 if ds[f] == b'r':
1439 # modified + removed -> removed
1439 # modified + removed -> removed
1440 continue
1440 continue
1441 ds.normallookup(f)
1441 ds.normallookup(f)
1442
1442
1443 for f in s.added:
1443 for f in s.added:
1444 if ds[f] == b'r':
1444 if ds[f] == b'r':
1445 # added + removed -> unknown
1445 # added + removed -> unknown
1446 ds.drop(f)
1446 ds.drop(f)
1447 elif ds[f] != b'a':
1447 elif ds[f] != b'a':
1448 ds.add(f)
1448 ds.add(f)
1449
1449
1450 for f in s.removed:
1450 for f in s.removed:
1451 if ds[f] == b'a':
1451 if ds[f] == b'a':
1452 # removed + added -> normal
1452 # removed + added -> normal
1453 ds.normallookup(f)
1453 ds.normallookup(f)
1454 elif ds[f] != b'r':
1454 elif ds[f] != b'r':
1455 ds.remove(f)
1455 ds.remove(f)
1456
1456
1457 # Merge old parent and old working dir copies
1457 # Merge old parent and old working dir copies
1458 oldcopies = copiesmod.pathcopies(newctx, oldctx, match)
1458 oldcopies = copiesmod.pathcopies(newctx, oldctx, match)
1459 oldcopies.update(copies)
1459 oldcopies.update(copies)
1460 copies = {
1460 copies = {
1461 dst: oldcopies.get(src, src)
1461 dst: oldcopies.get(src, src)
1462 for dst, src in pycompat.iteritems(oldcopies)
1462 for dst, src in pycompat.iteritems(oldcopies)
1463 }
1463 }
1464 # Adjust the dirstate copies
1464 # Adjust the dirstate copies
1465 for dst, src in pycompat.iteritems(copies):
1465 for dst, src in pycompat.iteritems(copies):
1466 if src not in newctx or dst in newctx or ds[dst] != b'a':
1466 if src not in newctx or dst in newctx or ds[dst] != b'a':
1467 src = None
1467 src = None
1468 ds.copy(src, dst)
1468 ds.copy(src, dst)
1469 repo._quick_access_changeid_invalidate()
1469 repo._quick_access_changeid_invalidate()
1470
1470
1471
1471
1472 def writerequires(opener, requirements):
1472 def writerequires(opener, requirements):
1473 with opener(b'requires', b'w', atomictemp=True) as fp:
1473 with opener(b'requires', b'w', atomictemp=True) as fp:
1474 for r in sorted(requirements):
1474 for r in sorted(requirements):
1475 fp.write(b"%s\n" % r)
1475 fp.write(b"%s\n" % r)
1476
1476
1477
1477
1478 class filecachesubentry(object):
1478 class filecachesubentry(object):
1479 def __init__(self, path, stat):
1479 def __init__(self, path, stat):
1480 self.path = path
1480 self.path = path
1481 self.cachestat = None
1481 self.cachestat = None
1482 self._cacheable = None
1482 self._cacheable = None
1483
1483
1484 if stat:
1484 if stat:
1485 self.cachestat = filecachesubentry.stat(self.path)
1485 self.cachestat = filecachesubentry.stat(self.path)
1486
1486
1487 if self.cachestat:
1487 if self.cachestat:
1488 self._cacheable = self.cachestat.cacheable()
1488 self._cacheable = self.cachestat.cacheable()
1489 else:
1489 else:
1490 # None means we don't know yet
1490 # None means we don't know yet
1491 self._cacheable = None
1491 self._cacheable = None
1492
1492
1493 def refresh(self):
1493 def refresh(self):
1494 if self.cacheable():
1494 if self.cacheable():
1495 self.cachestat = filecachesubentry.stat(self.path)
1495 self.cachestat = filecachesubentry.stat(self.path)
1496
1496
1497 def cacheable(self):
1497 def cacheable(self):
1498 if self._cacheable is not None:
1498 if self._cacheable is not None:
1499 return self._cacheable
1499 return self._cacheable
1500
1500
1501 # we don't know yet, assume it is for now
1501 # we don't know yet, assume it is for now
1502 return True
1502 return True
1503
1503
1504 def changed(self):
1504 def changed(self):
1505 # no point in going further if we can't cache it
1505 # no point in going further if we can't cache it
1506 if not self.cacheable():
1506 if not self.cacheable():
1507 return True
1507 return True
1508
1508
1509 newstat = filecachesubentry.stat(self.path)
1509 newstat = filecachesubentry.stat(self.path)
1510
1510
1511 # we may not know if it's cacheable yet, check again now
1511 # we may not know if it's cacheable yet, check again now
1512 if newstat and self._cacheable is None:
1512 if newstat and self._cacheable is None:
1513 self._cacheable = newstat.cacheable()
1513 self._cacheable = newstat.cacheable()
1514
1514
1515 # check again
1515 # check again
1516 if not self._cacheable:
1516 if not self._cacheable:
1517 return True
1517 return True
1518
1518
1519 if self.cachestat != newstat:
1519 if self.cachestat != newstat:
1520 self.cachestat = newstat
1520 self.cachestat = newstat
1521 return True
1521 return True
1522 else:
1522 else:
1523 return False
1523 return False
1524
1524
1525 @staticmethod
1525 @staticmethod
1526 def stat(path):
1526 def stat(path):
1527 try:
1527 try:
1528 return util.cachestat(path)
1528 return util.cachestat(path)
1529 except OSError as e:
1529 except OSError as e:
1530 if e.errno != errno.ENOENT:
1530 if e.errno != errno.ENOENT:
1531 raise
1531 raise
1532
1532
1533
1533
1534 class filecacheentry(object):
1534 class filecacheentry(object):
1535 def __init__(self, paths, stat=True):
1535 def __init__(self, paths, stat=True):
1536 self._entries = []
1536 self._entries = []
1537 for path in paths:
1537 for path in paths:
1538 self._entries.append(filecachesubentry(path, stat))
1538 self._entries.append(filecachesubentry(path, stat))
1539
1539
1540 def changed(self):
1540 def changed(self):
1541 '''true if any entry has changed'''
1541 '''true if any entry has changed'''
1542 for entry in self._entries:
1542 for entry in self._entries:
1543 if entry.changed():
1543 if entry.changed():
1544 return True
1544 return True
1545 return False
1545 return False
1546
1546
1547 def refresh(self):
1547 def refresh(self):
1548 for entry in self._entries:
1548 for entry in self._entries:
1549 entry.refresh()
1549 entry.refresh()
1550
1550
1551
1551
1552 class filecache(object):
1552 class filecache(object):
1553 """A property like decorator that tracks files under .hg/ for updates.
1553 """A property like decorator that tracks files under .hg/ for updates.
1554
1554
1555 On first access, the files defined as arguments are stat()ed and the
1555 On first access, the files defined as arguments are stat()ed and the
1556 results cached. The decorated function is called. The results are stashed
1556 results cached. The decorated function is called. The results are stashed
1557 away in a ``_filecache`` dict on the object whose method is decorated.
1557 away in a ``_filecache`` dict on the object whose method is decorated.
1558
1558
1559 On subsequent access, the cached result is used as it is set to the
1559 On subsequent access, the cached result is used as it is set to the
1560 instance dictionary.
1560 instance dictionary.
1561
1561
1562 On external property set/delete operations, the caller must update the
1562 On external property set/delete operations, the caller must update the
1563 corresponding _filecache entry appropriately. Use __class__.<attr>.set()
1563 corresponding _filecache entry appropriately. Use __class__.<attr>.set()
1564 instead of directly setting <attr>.
1564 instead of directly setting <attr>.
1565
1565
1566 When using the property API, the cached data is always used if available.
1566 When using the property API, the cached data is always used if available.
1567 No stat() is performed to check if the file has changed.
1567 No stat() is performed to check if the file has changed.
1568
1568
1569 Others can muck about with the state of the ``_filecache`` dict. e.g. they
1569 Others can muck about with the state of the ``_filecache`` dict. e.g. they
1570 can populate an entry before the property's getter is called. In this case,
1570 can populate an entry before the property's getter is called. In this case,
1571 entries in ``_filecache`` will be used during property operations,
1571 entries in ``_filecache`` will be used during property operations,
1572 if available. If the underlying file changes, it is up to external callers
1572 if available. If the underlying file changes, it is up to external callers
1573 to reflect this by e.g. calling ``delattr(obj, attr)`` to remove the cached
1573 to reflect this by e.g. calling ``delattr(obj, attr)`` to remove the cached
1574 method result as well as possibly calling ``del obj._filecache[attr]`` to
1574 method result as well as possibly calling ``del obj._filecache[attr]`` to
1575 remove the ``filecacheentry``.
1575 remove the ``filecacheentry``.
1576 """
1576 """
1577
1577
1578 def __init__(self, *paths):
1578 def __init__(self, *paths):
1579 self.paths = paths
1579 self.paths = paths
1580
1580
1581 def join(self, obj, fname):
1581 def join(self, obj, fname):
1582 """Used to compute the runtime path of a cached file.
1582 """Used to compute the runtime path of a cached file.
1583
1583
1584 Users should subclass filecache and provide their own version of this
1584 Users should subclass filecache and provide their own version of this
1585 function to call the appropriate join function on 'obj' (an instance
1585 function to call the appropriate join function on 'obj' (an instance
1586 of the class that its member function was decorated).
1586 of the class that its member function was decorated).
1587 """
1587 """
1588 raise NotImplementedError
1588 raise NotImplementedError
1589
1589
1590 def __call__(self, func):
1590 def __call__(self, func):
1591 self.func = func
1591 self.func = func
1592 self.sname = func.__name__
1592 self.sname = func.__name__
1593 self.name = pycompat.sysbytes(self.sname)
1593 self.name = pycompat.sysbytes(self.sname)
1594 return self
1594 return self
1595
1595
1596 def __get__(self, obj, type=None):
1596 def __get__(self, obj, type=None):
1597 # if accessed on the class, return the descriptor itself.
1597 # if accessed on the class, return the descriptor itself.
1598 if obj is None:
1598 if obj is None:
1599 return self
1599 return self
1600
1600
1601 assert self.sname not in obj.__dict__
1601 assert self.sname not in obj.__dict__
1602
1602
1603 entry = obj._filecache.get(self.name)
1603 entry = obj._filecache.get(self.name)
1604
1604
1605 if entry:
1605 if entry:
1606 if entry.changed():
1606 if entry.changed():
1607 entry.obj = self.func(obj)
1607 entry.obj = self.func(obj)
1608 else:
1608 else:
1609 paths = [self.join(obj, path) for path in self.paths]
1609 paths = [self.join(obj, path) for path in self.paths]
1610
1610
1611 # We stat -before- creating the object so our cache doesn't lie if
1611 # We stat -before- creating the object so our cache doesn't lie if
1612 # a writer modified between the time we read and stat
1612 # a writer modified between the time we read and stat
1613 entry = filecacheentry(paths, True)
1613 entry = filecacheentry(paths, True)
1614 entry.obj = self.func(obj)
1614 entry.obj = self.func(obj)
1615
1615
1616 obj._filecache[self.name] = entry
1616 obj._filecache[self.name] = entry
1617
1617
1618 obj.__dict__[self.sname] = entry.obj
1618 obj.__dict__[self.sname] = entry.obj
1619 return entry.obj
1619 return entry.obj
1620
1620
1621 # don't implement __set__(), which would make __dict__ lookup as slow as
1621 # don't implement __set__(), which would make __dict__ lookup as slow as
1622 # function call.
1622 # function call.
1623
1623
1624 def set(self, obj, value):
1624 def set(self, obj, value):
1625 if self.name not in obj._filecache:
1625 if self.name not in obj._filecache:
1626 # we add an entry for the missing value because X in __dict__
1626 # we add an entry for the missing value because X in __dict__
1627 # implies X in _filecache
1627 # implies X in _filecache
1628 paths = [self.join(obj, path) for path in self.paths]
1628 paths = [self.join(obj, path) for path in self.paths]
1629 ce = filecacheentry(paths, False)
1629 ce = filecacheentry(paths, False)
1630 obj._filecache[self.name] = ce
1630 obj._filecache[self.name] = ce
1631 else:
1631 else:
1632 ce = obj._filecache[self.name]
1632 ce = obj._filecache[self.name]
1633
1633
1634 ce.obj = value # update cached copy
1634 ce.obj = value # update cached copy
1635 obj.__dict__[self.sname] = value # update copy returned by obj.x
1635 obj.__dict__[self.sname] = value # update copy returned by obj.x
1636
1636
1637
1637
1638 def extdatasource(repo, source):
1638 def extdatasource(repo, source):
1639 """Gather a map of rev -> value dict from the specified source
1639 """Gather a map of rev -> value dict from the specified source
1640
1640
1641 A source spec is treated as a URL, with a special case shell: type
1641 A source spec is treated as a URL, with a special case shell: type
1642 for parsing the output from a shell command.
1642 for parsing the output from a shell command.
1643
1643
1644 The data is parsed as a series of newline-separated records where
1644 The data is parsed as a series of newline-separated records where
1645 each record is a revision specifier optionally followed by a space
1645 each record is a revision specifier optionally followed by a space
1646 and a freeform string value. If the revision is known locally, it
1646 and a freeform string value. If the revision is known locally, it
1647 is converted to a rev, otherwise the record is skipped.
1647 is converted to a rev, otherwise the record is skipped.
1648
1648
1649 Note that both key and value are treated as UTF-8 and converted to
1649 Note that both key and value are treated as UTF-8 and converted to
1650 the local encoding. This allows uniformity between local and
1650 the local encoding. This allows uniformity between local and
1651 remote data sources.
1651 remote data sources.
1652 """
1652 """
1653
1653
1654 spec = repo.ui.config(b"extdata", source)
1654 spec = repo.ui.config(b"extdata", source)
1655 if not spec:
1655 if not spec:
1656 raise error.Abort(_(b"unknown extdata source '%s'") % source)
1656 raise error.Abort(_(b"unknown extdata source '%s'") % source)
1657
1657
1658 data = {}
1658 data = {}
1659 src = proc = None
1659 src = proc = None
1660 try:
1660 try:
1661 if spec.startswith(b"shell:"):
1661 if spec.startswith(b"shell:"):
1662 # external commands should be run relative to the repo root
1662 # external commands should be run relative to the repo root
1663 cmd = spec[6:]
1663 cmd = spec[6:]
1664 proc = subprocess.Popen(
1664 proc = subprocess.Popen(
1665 procutil.tonativestr(cmd),
1665 procutil.tonativestr(cmd),
1666 shell=True,
1666 shell=True,
1667 bufsize=-1,
1667 bufsize=-1,
1668 close_fds=procutil.closefds,
1668 close_fds=procutil.closefds,
1669 stdout=subprocess.PIPE,
1669 stdout=subprocess.PIPE,
1670 cwd=procutil.tonativestr(repo.root),
1670 cwd=procutil.tonativestr(repo.root),
1671 )
1671 )
1672 src = proc.stdout
1672 src = proc.stdout
1673 else:
1673 else:
1674 # treat as a URL or file
1674 # treat as a URL or file
1675 src = url.open(repo.ui, spec)
1675 src = url.open(repo.ui, spec)
1676 for l in src:
1676 for l in src:
1677 if b" " in l:
1677 if b" " in l:
1678 k, v = l.strip().split(b" ", 1)
1678 k, v = l.strip().split(b" ", 1)
1679 else:
1679 else:
1680 k, v = l.strip(), b""
1680 k, v = l.strip(), b""
1681
1681
1682 k = encoding.tolocal(k)
1682 k = encoding.tolocal(k)
1683 try:
1683 try:
1684 data[revsingle(repo, k).rev()] = encoding.tolocal(v)
1684 data[revsingle(repo, k).rev()] = encoding.tolocal(v)
1685 except (error.LookupError, error.RepoLookupError):
1685 except (error.LookupError, error.RepoLookupError):
1686 pass # we ignore data for nodes that don't exist locally
1686 pass # we ignore data for nodes that don't exist locally
1687 finally:
1687 finally:
1688 if proc:
1688 if proc:
1689 try:
1689 try:
1690 proc.communicate()
1690 proc.communicate()
1691 except ValueError:
1691 except ValueError:
1692 # This happens if we started iterating src and then
1692 # This happens if we started iterating src and then
1693 # get a parse error on a line. It should be safe to ignore.
1693 # get a parse error on a line. It should be safe to ignore.
1694 pass
1694 pass
1695 if src:
1695 if src:
1696 src.close()
1696 src.close()
1697 if proc and proc.returncode != 0:
1697 if proc and proc.returncode != 0:
1698 raise error.Abort(
1698 raise error.Abort(
1699 _(b"extdata command '%s' failed: %s")
1699 _(b"extdata command '%s' failed: %s")
1700 % (cmd, procutil.explainexit(proc.returncode))
1700 % (cmd, procutil.explainexit(proc.returncode))
1701 )
1701 )
1702
1702
1703 return data
1703 return data
1704
1704
1705
1705
1706 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1706 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1707 if lock is None:
1707 if lock is None:
1708 raise error.LockInheritanceContractViolation(
1708 raise error.LockInheritanceContractViolation(
1709 b'lock can only be inherited while held'
1709 b'lock can only be inherited while held'
1710 )
1710 )
1711 if environ is None:
1711 if environ is None:
1712 environ = {}
1712 environ = {}
1713 with lock.inherit() as locker:
1713 with lock.inherit() as locker:
1714 environ[envvar] = locker
1714 environ[envvar] = locker
1715 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1715 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1716
1716
1717
1717
1718 def wlocksub(repo, cmd, *args, **kwargs):
1718 def wlocksub(repo, cmd, *args, **kwargs):
1719 """run cmd as a subprocess that allows inheriting repo's wlock
1719 """run cmd as a subprocess that allows inheriting repo's wlock
1720
1720
1721 This can only be called while the wlock is held. This takes all the
1721 This can only be called while the wlock is held. This takes all the
1722 arguments that ui.system does, and returns the exit code of the
1722 arguments that ui.system does, and returns the exit code of the
1723 subprocess."""
1723 subprocess."""
1724 return _locksub(
1724 return _locksub(
1725 repo, repo.currentwlock(), b'HG_WLOCK_LOCKER', cmd, *args, **kwargs
1725 repo, repo.currentwlock(), b'HG_WLOCK_LOCKER', cmd, *args, **kwargs
1726 )
1726 )
1727
1727
1728
1728
1729 class progress(object):
1729 class progress(object):
1730 def __init__(self, ui, updatebar, topic, unit=b"", total=None):
1730 def __init__(self, ui, updatebar, topic, unit=b"", total=None):
1731 self.ui = ui
1731 self.ui = ui
1732 self.pos = 0
1732 self.pos = 0
1733 self.topic = topic
1733 self.topic = topic
1734 self.unit = unit
1734 self.unit = unit
1735 self.total = total
1735 self.total = total
1736 self.debug = ui.configbool(b'progress', b'debug')
1736 self.debug = ui.configbool(b'progress', b'debug')
1737 self._updatebar = updatebar
1737 self._updatebar = updatebar
1738
1738
1739 def __enter__(self):
1739 def __enter__(self):
1740 return self
1740 return self
1741
1741
1742 def __exit__(self, exc_type, exc_value, exc_tb):
1742 def __exit__(self, exc_type, exc_value, exc_tb):
1743 self.complete()
1743 self.complete()
1744
1744
1745 def update(self, pos, item=b"", total=None):
1745 def update(self, pos, item=b"", total=None):
1746 assert pos is not None
1746 assert pos is not None
1747 if total:
1747 if total:
1748 self.total = total
1748 self.total = total
1749 self.pos = pos
1749 self.pos = pos
1750 self._updatebar(self.topic, self.pos, item, self.unit, self.total)
1750 self._updatebar(self.topic, self.pos, item, self.unit, self.total)
1751 if self.debug:
1751 if self.debug:
1752 self._printdebug(item)
1752 self._printdebug(item)
1753
1753
1754 def increment(self, step=1, item=b"", total=None):
1754 def increment(self, step=1, item=b"", total=None):
1755 self.update(self.pos + step, item, total)
1755 self.update(self.pos + step, item, total)
1756
1756
1757 def complete(self):
1757 def complete(self):
1758 self.pos = None
1758 self.pos = None
1759 self.unit = b""
1759 self.unit = b""
1760 self.total = None
1760 self.total = None
1761 self._updatebar(self.topic, self.pos, b"", self.unit, self.total)
1761 self._updatebar(self.topic, self.pos, b"", self.unit, self.total)
1762
1762
1763 def _printdebug(self, item):
1763 def _printdebug(self, item):
1764 unit = b''
1764 unit = b''
1765 if self.unit:
1765 if self.unit:
1766 unit = b' ' + self.unit
1766 unit = b' ' + self.unit
1767 if item:
1767 if item:
1768 item = b' ' + item
1768 item = b' ' + item
1769
1769
1770 if self.total:
1770 if self.total:
1771 pct = 100.0 * self.pos / self.total
1771 pct = 100.0 * self.pos / self.total
1772 self.ui.debug(
1772 self.ui.debug(
1773 b'%s:%s %d/%d%s (%4.2f%%)\n'
1773 b'%s:%s %d/%d%s (%4.2f%%)\n'
1774 % (self.topic, item, self.pos, self.total, unit, pct)
1774 % (self.topic, item, self.pos, self.total, unit, pct)
1775 )
1775 )
1776 else:
1776 else:
1777 self.ui.debug(b'%s:%s %d%s\n' % (self.topic, item, self.pos, unit))
1777 self.ui.debug(b'%s:%s %d%s\n' % (self.topic, item, self.pos, unit))
1778
1778
1779
1779
1780 def gdinitconfig(ui):
1780 def gdinitconfig(ui):
1781 """helper function to know if a repo should be created as general delta
1781 """helper function to know if a repo should be created as general delta
1782 """
1782 """
1783 # experimental config: format.generaldelta
1783 # experimental config: format.generaldelta
1784 return ui.configbool(b'format', b'generaldelta') or ui.configbool(
1784 return ui.configbool(b'format', b'generaldelta') or ui.configbool(
1785 b'format', b'usegeneraldelta'
1785 b'format', b'usegeneraldelta'
1786 )
1786 )
1787
1787
1788
1788
1789 def gddeltaconfig(ui):
1789 def gddeltaconfig(ui):
1790 """helper function to know if incoming delta should be optimised
1790 """helper function to know if incoming delta should be optimised
1791 """
1791 """
1792 # experimental config: format.generaldelta
1792 # experimental config: format.generaldelta
1793 return ui.configbool(b'format', b'generaldelta')
1793 return ui.configbool(b'format', b'generaldelta')
1794
1794
1795
1795
1796 class simplekeyvaluefile(object):
1796 class simplekeyvaluefile(object):
1797 """A simple file with key=value lines
1797 """A simple file with key=value lines
1798
1798
1799 Keys must be alphanumerics and start with a letter, values must not
1799 Keys must be alphanumerics and start with a letter, values must not
1800 contain '\n' characters"""
1800 contain '\n' characters"""
1801
1801
1802 firstlinekey = b'__firstline'
1802 firstlinekey = b'__firstline'
1803
1803
1804 def __init__(self, vfs, path, keys=None):
1804 def __init__(self, vfs, path, keys=None):
1805 self.vfs = vfs
1805 self.vfs = vfs
1806 self.path = path
1806 self.path = path
1807
1807
1808 def read(self, firstlinenonkeyval=False):
1808 def read(self, firstlinenonkeyval=False):
1809 """Read the contents of a simple key-value file
1809 """Read the contents of a simple key-value file
1810
1810
1811 'firstlinenonkeyval' indicates whether the first line of file should
1811 'firstlinenonkeyval' indicates whether the first line of file should
1812 be treated as a key-value pair or reuturned fully under the
1812 be treated as a key-value pair or reuturned fully under the
1813 __firstline key."""
1813 __firstline key."""
1814 lines = self.vfs.readlines(self.path)
1814 lines = self.vfs.readlines(self.path)
1815 d = {}
1815 d = {}
1816 if firstlinenonkeyval:
1816 if firstlinenonkeyval:
1817 if not lines:
1817 if not lines:
1818 e = _(b"empty simplekeyvalue file")
1818 e = _(b"empty simplekeyvalue file")
1819 raise error.CorruptedState(e)
1819 raise error.CorruptedState(e)
1820 # we don't want to include '\n' in the __firstline
1820 # we don't want to include '\n' in the __firstline
1821 d[self.firstlinekey] = lines[0][:-1]
1821 d[self.firstlinekey] = lines[0][:-1]
1822 del lines[0]
1822 del lines[0]
1823
1823
1824 try:
1824 try:
1825 # the 'if line.strip()' part prevents us from failing on empty
1825 # the 'if line.strip()' part prevents us from failing on empty
1826 # lines which only contain '\n' therefore are not skipped
1826 # lines which only contain '\n' therefore are not skipped
1827 # by 'if line'
1827 # by 'if line'
1828 updatedict = dict(
1828 updatedict = dict(
1829 line[:-1].split(b'=', 1) for line in lines if line.strip()
1829 line[:-1].split(b'=', 1) for line in lines if line.strip()
1830 )
1830 )
1831 if self.firstlinekey in updatedict:
1831 if self.firstlinekey in updatedict:
1832 e = _(b"%r can't be used as a key")
1832 e = _(b"%r can't be used as a key")
1833 raise error.CorruptedState(e % self.firstlinekey)
1833 raise error.CorruptedState(e % self.firstlinekey)
1834 d.update(updatedict)
1834 d.update(updatedict)
1835 except ValueError as e:
1835 except ValueError as e:
1836 raise error.CorruptedState(stringutil.forcebytestr(e))
1836 raise error.CorruptedState(stringutil.forcebytestr(e))
1837 return d
1837 return d
1838
1838
1839 def write(self, data, firstline=None):
1839 def write(self, data, firstline=None):
1840 """Write key=>value mapping to a file
1840 """Write key=>value mapping to a file
1841 data is a dict. Keys must be alphanumerical and start with a letter.
1841 data is a dict. Keys must be alphanumerical and start with a letter.
1842 Values must not contain newline characters.
1842 Values must not contain newline characters.
1843
1843
1844 If 'firstline' is not None, it is written to file before
1844 If 'firstline' is not None, it is written to file before
1845 everything else, as it is, not in a key=value form"""
1845 everything else, as it is, not in a key=value form"""
1846 lines = []
1846 lines = []
1847 if firstline is not None:
1847 if firstline is not None:
1848 lines.append(b'%s\n' % firstline)
1848 lines.append(b'%s\n' % firstline)
1849
1849
1850 for k, v in data.items():
1850 for k, v in data.items():
1851 if k == self.firstlinekey:
1851 if k == self.firstlinekey:
1852 e = b"key name '%s' is reserved" % self.firstlinekey
1852 e = b"key name '%s' is reserved" % self.firstlinekey
1853 raise error.ProgrammingError(e)
1853 raise error.ProgrammingError(e)
1854 if not k[0:1].isalpha():
1854 if not k[0:1].isalpha():
1855 e = b"keys must start with a letter in a key-value file"
1855 e = b"keys must start with a letter in a key-value file"
1856 raise error.ProgrammingError(e)
1856 raise error.ProgrammingError(e)
1857 if not k.isalnum():
1857 if not k.isalnum():
1858 e = b"invalid key name in a simple key-value file"
1858 e = b"invalid key name in a simple key-value file"
1859 raise error.ProgrammingError(e)
1859 raise error.ProgrammingError(e)
1860 if b'\n' in v:
1860 if b'\n' in v:
1861 e = b"invalid value in a simple key-value file"
1861 e = b"invalid value in a simple key-value file"
1862 raise error.ProgrammingError(e)
1862 raise error.ProgrammingError(e)
1863 lines.append(b"%s=%s\n" % (k, v))
1863 lines.append(b"%s=%s\n" % (k, v))
1864 with self.vfs(self.path, mode=b'wb', atomictemp=True) as fp:
1864 with self.vfs(self.path, mode=b'wb', atomictemp=True) as fp:
1865 fp.write(b''.join(lines))
1865 fp.write(b''.join(lines))
1866
1866
1867
1867
1868 _reportobsoletedsource = [
1868 _reportobsoletedsource = [
1869 b'debugobsolete',
1869 b'debugobsolete',
1870 b'pull',
1870 b'pull',
1871 b'push',
1871 b'push',
1872 b'serve',
1872 b'serve',
1873 b'unbundle',
1873 b'unbundle',
1874 ]
1874 ]
1875
1875
1876 _reportnewcssource = [
1876 _reportnewcssource = [
1877 b'pull',
1877 b'pull',
1878 b'unbundle',
1878 b'unbundle',
1879 ]
1879 ]
1880
1880
1881
1881
1882 def prefetchfiles(repo, revs, match):
1882 def prefetchfiles(repo, revs, match):
1883 """Invokes the registered file prefetch functions, allowing extensions to
1883 """Invokes the registered file prefetch functions, allowing extensions to
1884 ensure the corresponding files are available locally, before the command
1884 ensure the corresponding files are available locally, before the command
1885 uses them."""
1885 uses them."""
1886 if match:
1886 if match:
1887 # The command itself will complain about files that don't exist, so
1887 # The command itself will complain about files that don't exist, so
1888 # don't duplicate the message.
1888 # don't duplicate the message.
1889 match = matchmod.badmatch(match, lambda fn, msg: None)
1889 match = matchmod.badmatch(match, lambda fn, msg: None)
1890 else:
1890 else:
1891 match = matchall(repo)
1891 match = matchall(repo)
1892
1892
1893 fileprefetchhooks(repo, revs, match)
1893 fileprefetchhooks(repo, revs, match)
1894
1894
1895
1895
1896 # a list of (repo, revs, match) prefetch functions
1896 # a list of (repo, revs, match) prefetch functions
1897 fileprefetchhooks = util.hooks()
1897 fileprefetchhooks = util.hooks()
1898
1898
1899 # A marker that tells the evolve extension to suppress its own reporting
1899 # A marker that tells the evolve extension to suppress its own reporting
1900 _reportstroubledchangesets = True
1900 _reportstroubledchangesets = True
1901
1901
1902
1902
1903 def registersummarycallback(repo, otr, txnname=b''):
1903 def registersummarycallback(repo, otr, txnname=b'', as_validator=False):
1904 """register a callback to issue a summary after the transaction is closed
1904 """register a callback to issue a summary after the transaction is closed
1905
1906 If as_validator is true, then the callbacks are registered as transaction
1907 validators instead
1905 """
1908 """
1906
1909
1907 def txmatch(sources):
1910 def txmatch(sources):
1908 return any(txnname.startswith(source) for source in sources)
1911 return any(txnname.startswith(source) for source in sources)
1909
1912
1910 categories = []
1913 categories = []
1911
1914
1912 def reportsummary(func):
1915 def reportsummary(func):
1913 """decorator for report callbacks."""
1916 """decorator for report callbacks."""
1914 # The repoview life cycle is shorter than the one of the actual
1917 # The repoview life cycle is shorter than the one of the actual
1915 # underlying repository. So the filtered object can die before the
1918 # underlying repository. So the filtered object can die before the
1916 # weakref is used leading to troubles. We keep a reference to the
1919 # weakref is used leading to troubles. We keep a reference to the
1917 # unfiltered object and restore the filtering when retrieving the
1920 # unfiltered object and restore the filtering when retrieving the
1918 # repository through the weakref.
1921 # repository through the weakref.
1919 filtername = repo.filtername
1922 filtername = repo.filtername
1920 reporef = weakref.ref(repo.unfiltered())
1923 reporef = weakref.ref(repo.unfiltered())
1921
1924
1922 def wrapped(tr):
1925 def wrapped(tr):
1923 repo = reporef()
1926 repo = reporef()
1924 if filtername:
1927 if filtername:
1925 assert repo is not None # help pytype
1928 assert repo is not None # help pytype
1926 repo = repo.filtered(filtername)
1929 repo = repo.filtered(filtername)
1927 func(repo, tr)
1930 func(repo, tr)
1928
1931
1929 newcat = b'%02i-txnreport' % len(categories)
1932 newcat = b'%02i-txnreport' % len(categories)
1930 otr.addpostclose(newcat, wrapped)
1933 if as_validator:
1934 otr.addvalidator(newcat, wrapped)
1935 else:
1936 otr.addpostclose(newcat, wrapped)
1931 categories.append(newcat)
1937 categories.append(newcat)
1932 return wrapped
1938 return wrapped
1933
1939
1934 @reportsummary
1940 @reportsummary
1935 def reportchangegroup(repo, tr):
1941 def reportchangegroup(repo, tr):
1936 cgchangesets = tr.changes.get(b'changegroup-count-changesets', 0)
1942 cgchangesets = tr.changes.get(b'changegroup-count-changesets', 0)
1937 cgrevisions = tr.changes.get(b'changegroup-count-revisions', 0)
1943 cgrevisions = tr.changes.get(b'changegroup-count-revisions', 0)
1938 cgfiles = tr.changes.get(b'changegroup-count-files', 0)
1944 cgfiles = tr.changes.get(b'changegroup-count-files', 0)
1939 cgheads = tr.changes.get(b'changegroup-count-heads', 0)
1945 cgheads = tr.changes.get(b'changegroup-count-heads', 0)
1940 if cgchangesets or cgrevisions or cgfiles:
1946 if cgchangesets or cgrevisions or cgfiles:
1941 htext = b""
1947 htext = b""
1942 if cgheads:
1948 if cgheads:
1943 htext = _(b" (%+d heads)") % cgheads
1949 htext = _(b" (%+d heads)") % cgheads
1944 msg = _(b"added %d changesets with %d changes to %d files%s\n")
1950 msg = _(b"added %d changesets with %d changes to %d files%s\n")
1951 if as_validator:
1952 msg = _(b"adding %d changesets with %d changes to %d files%s\n")
1945 assert repo is not None # help pytype
1953 assert repo is not None # help pytype
1946 repo.ui.status(msg % (cgchangesets, cgrevisions, cgfiles, htext))
1954 repo.ui.status(msg % (cgchangesets, cgrevisions, cgfiles, htext))
1947
1955
1948 if txmatch(_reportobsoletedsource):
1956 if txmatch(_reportobsoletedsource):
1949
1957
1950 @reportsummary
1958 @reportsummary
1951 def reportobsoleted(repo, tr):
1959 def reportobsoleted(repo, tr):
1952 obsoleted = obsutil.getobsoleted(repo, tr)
1960 obsoleted = obsutil.getobsoleted(repo, tr)
1953 newmarkers = len(tr.changes.get(b'obsmarkers', ()))
1961 newmarkers = len(tr.changes.get(b'obsmarkers', ()))
1954 if newmarkers:
1962 if newmarkers:
1955 repo.ui.status(_(b'%i new obsolescence markers\n') % newmarkers)
1963 repo.ui.status(_(b'%i new obsolescence markers\n') % newmarkers)
1956 if obsoleted:
1964 if obsoleted:
1957 repo.ui.status(_(b'obsoleted %i changesets\n') % len(obsoleted))
1965 msg = _(b'obsoleted %i changesets\n')
1966 if as_validator:
1967 msg = _(b'obsoleting %i changesets\n')
1968 repo.ui.status(msg % len(obsoleted))
1958
1969
1959 if obsolete.isenabled(
1970 if obsolete.isenabled(
1960 repo, obsolete.createmarkersopt
1971 repo, obsolete.createmarkersopt
1961 ) and repo.ui.configbool(
1972 ) and repo.ui.configbool(
1962 b'experimental', b'evolution.report-instabilities'
1973 b'experimental', b'evolution.report-instabilities'
1963 ):
1974 ):
1964 instabilitytypes = [
1975 instabilitytypes = [
1965 (b'orphan', b'orphan'),
1976 (b'orphan', b'orphan'),
1966 (b'phase-divergent', b'phasedivergent'),
1977 (b'phase-divergent', b'phasedivergent'),
1967 (b'content-divergent', b'contentdivergent'),
1978 (b'content-divergent', b'contentdivergent'),
1968 ]
1979 ]
1969
1980
1970 def getinstabilitycounts(repo):
1981 def getinstabilitycounts(repo):
1971 filtered = repo.changelog.filteredrevs
1982 filtered = repo.changelog.filteredrevs
1972 counts = {}
1983 counts = {}
1973 for instability, revset in instabilitytypes:
1984 for instability, revset in instabilitytypes:
1974 counts[instability] = len(
1985 counts[instability] = len(
1975 set(obsolete.getrevs(repo, revset)) - filtered
1986 set(obsolete.getrevs(repo, revset)) - filtered
1976 )
1987 )
1977 return counts
1988 return counts
1978
1989
1979 oldinstabilitycounts = getinstabilitycounts(repo)
1990 oldinstabilitycounts = getinstabilitycounts(repo)
1980
1991
1981 @reportsummary
1992 @reportsummary
1982 def reportnewinstabilities(repo, tr):
1993 def reportnewinstabilities(repo, tr):
1983 newinstabilitycounts = getinstabilitycounts(repo)
1994 newinstabilitycounts = getinstabilitycounts(repo)
1984 for instability, revset in instabilitytypes:
1995 for instability, revset in instabilitytypes:
1985 delta = (
1996 delta = (
1986 newinstabilitycounts[instability]
1997 newinstabilitycounts[instability]
1987 - oldinstabilitycounts[instability]
1998 - oldinstabilitycounts[instability]
1988 )
1999 )
1989 msg = getinstabilitymessage(delta, instability)
2000 msg = getinstabilitymessage(delta, instability)
1990 if msg:
2001 if msg:
1991 repo.ui.warn(msg)
2002 repo.ui.warn(msg)
1992
2003
1993 if txmatch(_reportnewcssource):
2004 if txmatch(_reportnewcssource):
1994
2005
1995 @reportsummary
2006 @reportsummary
1996 def reportnewcs(repo, tr):
2007 def reportnewcs(repo, tr):
1997 """Report the range of new revisions pulled/unbundled."""
2008 """Report the range of new revisions pulled/unbundled."""
1998 origrepolen = tr.changes.get(b'origrepolen', len(repo))
2009 origrepolen = tr.changes.get(b'origrepolen', len(repo))
1999 unfi = repo.unfiltered()
2010 unfi = repo.unfiltered()
2000 if origrepolen >= len(unfi):
2011 if origrepolen >= len(unfi):
2001 return
2012 return
2002
2013
2003 # Compute the bounds of new visible revisions' range.
2014 # Compute the bounds of new visible revisions' range.
2004 revs = smartset.spanset(repo, start=origrepolen)
2015 revs = smartset.spanset(repo, start=origrepolen)
2005 if revs:
2016 if revs:
2006 minrev, maxrev = repo[revs.min()], repo[revs.max()]
2017 minrev, maxrev = repo[revs.min()], repo[revs.max()]
2007
2018
2008 if minrev == maxrev:
2019 if minrev == maxrev:
2009 revrange = minrev
2020 revrange = minrev
2010 else:
2021 else:
2011 revrange = b'%s:%s' % (minrev, maxrev)
2022 revrange = b'%s:%s' % (minrev, maxrev)
2012 draft = len(repo.revs(b'%ld and draft()', revs))
2023 draft = len(repo.revs(b'%ld and draft()', revs))
2013 secret = len(repo.revs(b'%ld and secret()', revs))
2024 secret = len(repo.revs(b'%ld and secret()', revs))
2014 if not (draft or secret):
2025 if not (draft or secret):
2015 msg = _(b'new changesets %s\n') % revrange
2026 msg = _(b'new changesets %s\n') % revrange
2016 elif draft and secret:
2027 elif draft and secret:
2017 msg = _(b'new changesets %s (%d drafts, %d secrets)\n')
2028 msg = _(b'new changesets %s (%d drafts, %d secrets)\n')
2018 msg %= (revrange, draft, secret)
2029 msg %= (revrange, draft, secret)
2019 elif draft:
2030 elif draft:
2020 msg = _(b'new changesets %s (%d drafts)\n')
2031 msg = _(b'new changesets %s (%d drafts)\n')
2021 msg %= (revrange, draft)
2032 msg %= (revrange, draft)
2022 elif secret:
2033 elif secret:
2023 msg = _(b'new changesets %s (%d secrets)\n')
2034 msg = _(b'new changesets %s (%d secrets)\n')
2024 msg %= (revrange, secret)
2035 msg %= (revrange, secret)
2025 else:
2036 else:
2026 errormsg = b'entered unreachable condition'
2037 errormsg = b'entered unreachable condition'
2027 raise error.ProgrammingError(errormsg)
2038 raise error.ProgrammingError(errormsg)
2028 repo.ui.status(msg)
2039 repo.ui.status(msg)
2029
2040
2030 # search new changesets directly pulled as obsolete
2041 # search new changesets directly pulled as obsolete
2031 duplicates = tr.changes.get(b'revduplicates', ())
2042 duplicates = tr.changes.get(b'revduplicates', ())
2032 obsadded = unfi.revs(
2043 obsadded = unfi.revs(
2033 b'(%d: + %ld) and obsolete()', origrepolen, duplicates
2044 b'(%d: + %ld) and obsolete()', origrepolen, duplicates
2034 )
2045 )
2035 cl = repo.changelog
2046 cl = repo.changelog
2036 extinctadded = [r for r in obsadded if r not in cl]
2047 extinctadded = [r for r in obsadded if r not in cl]
2037 if extinctadded:
2048 if extinctadded:
2038 # They are not just obsolete, but obsolete and invisible
2049 # They are not just obsolete, but obsolete and invisible
2039 # we call them "extinct" internally but the terms have not been
2050 # we call them "extinct" internally but the terms have not been
2040 # exposed to users.
2051 # exposed to users.
2041 msg = b'(%d other changesets obsolete on arrival)\n'
2052 msg = b'(%d other changesets obsolete on arrival)\n'
2042 repo.ui.status(msg % len(extinctadded))
2053 repo.ui.status(msg % len(extinctadded))
2043
2054
2044 @reportsummary
2055 @reportsummary
2045 def reportphasechanges(repo, tr):
2056 def reportphasechanges(repo, tr):
2046 """Report statistics of phase changes for changesets pre-existing
2057 """Report statistics of phase changes for changesets pre-existing
2047 pull/unbundle.
2058 pull/unbundle.
2048 """
2059 """
2049 origrepolen = tr.changes.get(b'origrepolen', len(repo))
2060 origrepolen = tr.changes.get(b'origrepolen', len(repo))
2050 phasetracking = tr.changes.get(b'phases', {})
2061 phasetracking = tr.changes.get(b'phases', {})
2051 if not phasetracking:
2062 if not phasetracking:
2052 return
2063 return
2053 published = [
2064 published = [
2054 rev
2065 rev
2055 for rev, (old, new) in pycompat.iteritems(phasetracking)
2066 for rev, (old, new) in pycompat.iteritems(phasetracking)
2056 if new == phases.public and rev < origrepolen
2067 if new == phases.public and rev < origrepolen
2057 ]
2068 ]
2058 if not published:
2069 if not published:
2059 return
2070 return
2060 repo.ui.status(
2071 msg = _(b'%d local changesets published\n')
2061 _(b'%d local changesets published\n') % len(published)
2072 if as_validator:
2062 )
2073 msg = _(b'%d local changesets will be published\n')
2074 repo.ui.status(msg % len(published))
2063
2075
2064
2076
2065 def getinstabilitymessage(delta, instability):
2077 def getinstabilitymessage(delta, instability):
2066 """function to return the message to show warning about new instabilities
2078 """function to return the message to show warning about new instabilities
2067
2079
2068 exists as a separate function so that extension can wrap to show more
2080 exists as a separate function so that extension can wrap to show more
2069 information like how to fix instabilities"""
2081 information like how to fix instabilities"""
2070 if delta > 0:
2082 if delta > 0:
2071 return _(b'%i new %s changesets\n') % (delta, instability)
2083 return _(b'%i new %s changesets\n') % (delta, instability)
2072
2084
2073
2085
2074 def nodesummaries(repo, nodes, maxnumnodes=4):
2086 def nodesummaries(repo, nodes, maxnumnodes=4):
2075 if len(nodes) <= maxnumnodes or repo.ui.verbose:
2087 if len(nodes) <= maxnumnodes or repo.ui.verbose:
2076 return b' '.join(short(h) for h in nodes)
2088 return b' '.join(short(h) for h in nodes)
2077 first = b' '.join(short(h) for h in nodes[:maxnumnodes])
2089 first = b' '.join(short(h) for h in nodes[:maxnumnodes])
2078 return _(b"%s and %d others") % (first, len(nodes) - maxnumnodes)
2090 return _(b"%s and %d others") % (first, len(nodes) - maxnumnodes)
2079
2091
2080
2092
2081 def enforcesinglehead(repo, tr, desc, accountclosed=False):
2093 def enforcesinglehead(repo, tr, desc, accountclosed=False):
2082 """check that no named branch has multiple heads"""
2094 """check that no named branch has multiple heads"""
2083 if desc in (b'strip', b'repair'):
2095 if desc in (b'strip', b'repair'):
2084 # skip the logic during strip
2096 # skip the logic during strip
2085 return
2097 return
2086 visible = repo.filtered(b'visible')
2098 visible = repo.filtered(b'visible')
2087 # possible improvement: we could restrict the check to affected branch
2099 # possible improvement: we could restrict the check to affected branch
2088 bm = visible.branchmap()
2100 bm = visible.branchmap()
2089 for name in bm:
2101 for name in bm:
2090 heads = bm.branchheads(name, closed=accountclosed)
2102 heads = bm.branchheads(name, closed=accountclosed)
2091 if len(heads) > 1:
2103 if len(heads) > 1:
2092 msg = _(b'rejecting multiple heads on branch "%s"')
2104 msg = _(b'rejecting multiple heads on branch "%s"')
2093 msg %= name
2105 msg %= name
2094 hint = _(b'%d heads: %s')
2106 hint = _(b'%d heads: %s')
2095 hint %= (len(heads), nodesummaries(repo, heads))
2107 hint %= (len(heads), nodesummaries(repo, heads))
2096 raise error.Abort(msg, hint=hint)
2108 raise error.Abort(msg, hint=hint)
2097
2109
2098
2110
2099 def wrapconvertsink(sink):
2111 def wrapconvertsink(sink):
2100 """Allow extensions to wrap the sink returned by convcmd.convertsink()
2112 """Allow extensions to wrap the sink returned by convcmd.convertsink()
2101 before it is used, whether or not the convert extension was formally loaded.
2113 before it is used, whether or not the convert extension was formally loaded.
2102 """
2114 """
2103 return sink
2115 return sink
2104
2116
2105
2117
2106 def unhidehashlikerevs(repo, specs, hiddentype):
2118 def unhidehashlikerevs(repo, specs, hiddentype):
2107 """parse the user specs and unhide changesets whose hash or revision number
2119 """parse the user specs and unhide changesets whose hash or revision number
2108 is passed.
2120 is passed.
2109
2121
2110 hiddentype can be: 1) 'warn': warn while unhiding changesets
2122 hiddentype can be: 1) 'warn': warn while unhiding changesets
2111 2) 'nowarn': don't warn while unhiding changesets
2123 2) 'nowarn': don't warn while unhiding changesets
2112
2124
2113 returns a repo object with the required changesets unhidden
2125 returns a repo object with the required changesets unhidden
2114 """
2126 """
2115 if not repo.filtername or not repo.ui.configbool(
2127 if not repo.filtername or not repo.ui.configbool(
2116 b'experimental', b'directaccess'
2128 b'experimental', b'directaccess'
2117 ):
2129 ):
2118 return repo
2130 return repo
2119
2131
2120 if repo.filtername not in (b'visible', b'visible-hidden'):
2132 if repo.filtername not in (b'visible', b'visible-hidden'):
2121 return repo
2133 return repo
2122
2134
2123 symbols = set()
2135 symbols = set()
2124 for spec in specs:
2136 for spec in specs:
2125 try:
2137 try:
2126 tree = revsetlang.parse(spec)
2138 tree = revsetlang.parse(spec)
2127 except error.ParseError: # will be reported by scmutil.revrange()
2139 except error.ParseError: # will be reported by scmutil.revrange()
2128 continue
2140 continue
2129
2141
2130 symbols.update(revsetlang.gethashlikesymbols(tree))
2142 symbols.update(revsetlang.gethashlikesymbols(tree))
2131
2143
2132 if not symbols:
2144 if not symbols:
2133 return repo
2145 return repo
2134
2146
2135 revs = _getrevsfromsymbols(repo, symbols)
2147 revs = _getrevsfromsymbols(repo, symbols)
2136
2148
2137 if not revs:
2149 if not revs:
2138 return repo
2150 return repo
2139
2151
2140 if hiddentype == b'warn':
2152 if hiddentype == b'warn':
2141 unfi = repo.unfiltered()
2153 unfi = repo.unfiltered()
2142 revstr = b", ".join([pycompat.bytestr(unfi[l]) for l in revs])
2154 revstr = b", ".join([pycompat.bytestr(unfi[l]) for l in revs])
2143 repo.ui.warn(
2155 repo.ui.warn(
2144 _(
2156 _(
2145 b"warning: accessing hidden changesets for write "
2157 b"warning: accessing hidden changesets for write "
2146 b"operation: %s\n"
2158 b"operation: %s\n"
2147 )
2159 )
2148 % revstr
2160 % revstr
2149 )
2161 )
2150
2162
2151 # we have to use new filtername to separate branch/tags cache until we can
2163 # we have to use new filtername to separate branch/tags cache until we can
2152 # disbale these cache when revisions are dynamically pinned.
2164 # disbale these cache when revisions are dynamically pinned.
2153 return repo.filtered(b'visible-hidden', revs)
2165 return repo.filtered(b'visible-hidden', revs)
2154
2166
2155
2167
2156 def _getrevsfromsymbols(repo, symbols):
2168 def _getrevsfromsymbols(repo, symbols):
2157 """parse the list of symbols and returns a set of revision numbers of hidden
2169 """parse the list of symbols and returns a set of revision numbers of hidden
2158 changesets present in symbols"""
2170 changesets present in symbols"""
2159 revs = set()
2171 revs = set()
2160 unfi = repo.unfiltered()
2172 unfi = repo.unfiltered()
2161 unficl = unfi.changelog
2173 unficl = unfi.changelog
2162 cl = repo.changelog
2174 cl = repo.changelog
2163 tiprev = len(unficl)
2175 tiprev = len(unficl)
2164 allowrevnums = repo.ui.configbool(b'experimental', b'directaccess.revnums')
2176 allowrevnums = repo.ui.configbool(b'experimental', b'directaccess.revnums')
2165 for s in symbols:
2177 for s in symbols:
2166 try:
2178 try:
2167 n = int(s)
2179 n = int(s)
2168 if n <= tiprev:
2180 if n <= tiprev:
2169 if not allowrevnums:
2181 if not allowrevnums:
2170 continue
2182 continue
2171 else:
2183 else:
2172 if n not in cl:
2184 if n not in cl:
2173 revs.add(n)
2185 revs.add(n)
2174 continue
2186 continue
2175 except ValueError:
2187 except ValueError:
2176 pass
2188 pass
2177
2189
2178 try:
2190 try:
2179 s = resolvehexnodeidprefix(unfi, s)
2191 s = resolvehexnodeidprefix(unfi, s)
2180 except (error.LookupError, error.WdirUnsupported):
2192 except (error.LookupError, error.WdirUnsupported):
2181 s = None
2193 s = None
2182
2194
2183 if s is not None:
2195 if s is not None:
2184 rev = unficl.rev(s)
2196 rev = unficl.rev(s)
2185 if rev not in cl:
2197 if rev not in cl:
2186 revs.add(rev)
2198 revs.add(rev)
2187
2199
2188 return revs
2200 return revs
2189
2201
2190
2202
2191 def bookmarkrevs(repo, mark):
2203 def bookmarkrevs(repo, mark):
2192 """
2204 """
2193 Select revisions reachable by a given bookmark
2205 Select revisions reachable by a given bookmark
2194 """
2206 """
2195 return repo.revs(
2207 return repo.revs(
2196 b"ancestors(bookmark(%s)) - "
2208 b"ancestors(bookmark(%s)) - "
2197 b"ancestors(head() and not bookmark(%s)) - "
2209 b"ancestors(head() and not bookmark(%s)) - "
2198 b"ancestors(bookmark() and not bookmark(%s))",
2210 b"ancestors(bookmark() and not bookmark(%s))",
2199 mark,
2211 mark,
2200 mark,
2212 mark,
2201 mark,
2213 mark,
2202 )
2214 )
General Comments 0
You need to be logged in to leave comments. Login now