##// END OF EJS Templates
movedirstate: get copies from dirstate before setting parents...
Martin von Zweigbergk -
r44490:0750cbff default
parent child Browse files
Show More
@@ -1,2200 +1,2200 b''
1 # scmutil.py - Mercurial core utility functions
1 # scmutil.py - Mercurial core utility functions
2 #
2 #
3 # Copyright Matt Mackall <mpm@selenic.com>
3 # Copyright Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import glob
11 import glob
12 import hashlib
12 import hashlib
13 import os
13 import os
14 import posixpath
14 import posixpath
15 import re
15 import re
16 import subprocess
16 import subprocess
17 import weakref
17 import weakref
18
18
19 from .i18n import _
19 from .i18n import _
20 from .node import (
20 from .node import (
21 bin,
21 bin,
22 hex,
22 hex,
23 nullid,
23 nullid,
24 nullrev,
24 nullrev,
25 short,
25 short,
26 wdirid,
26 wdirid,
27 wdirrev,
27 wdirrev,
28 )
28 )
29 from .pycompat import getattr
29 from .pycompat import getattr
30 from .thirdparty import attr
30 from .thirdparty import attr
31 from . import (
31 from . import (
32 copies as copiesmod,
32 copies as copiesmod,
33 encoding,
33 encoding,
34 error,
34 error,
35 match as matchmod,
35 match as matchmod,
36 obsolete,
36 obsolete,
37 obsutil,
37 obsutil,
38 pathutil,
38 pathutil,
39 phases,
39 phases,
40 policy,
40 policy,
41 pycompat,
41 pycompat,
42 revsetlang,
42 revsetlang,
43 similar,
43 similar,
44 smartset,
44 smartset,
45 url,
45 url,
46 util,
46 util,
47 vfs,
47 vfs,
48 )
48 )
49
49
50 from .utils import (
50 from .utils import (
51 procutil,
51 procutil,
52 stringutil,
52 stringutil,
53 )
53 )
54
54
55 if pycompat.iswindows:
55 if pycompat.iswindows:
56 from . import scmwindows as scmplatform
56 from . import scmwindows as scmplatform
57 else:
57 else:
58 from . import scmposix as scmplatform
58 from . import scmposix as scmplatform
59
59
60 parsers = policy.importmod('parsers')
60 parsers = policy.importmod('parsers')
61 rustrevlog = policy.importrust('revlog')
61 rustrevlog = policy.importrust('revlog')
62
62
63 termsize = scmplatform.termsize
63 termsize = scmplatform.termsize
64
64
65
65
66 @attr.s(slots=True, repr=False)
66 @attr.s(slots=True, repr=False)
67 class status(object):
67 class status(object):
68 '''Struct with a list of files per status.
68 '''Struct with a list of files per status.
69
69
70 The 'deleted', 'unknown' and 'ignored' properties are only
70 The 'deleted', 'unknown' and 'ignored' properties are only
71 relevant to the working copy.
71 relevant to the working copy.
72 '''
72 '''
73
73
74 modified = attr.ib(default=attr.Factory(list))
74 modified = attr.ib(default=attr.Factory(list))
75 added = attr.ib(default=attr.Factory(list))
75 added = attr.ib(default=attr.Factory(list))
76 removed = attr.ib(default=attr.Factory(list))
76 removed = attr.ib(default=attr.Factory(list))
77 deleted = attr.ib(default=attr.Factory(list))
77 deleted = attr.ib(default=attr.Factory(list))
78 unknown = attr.ib(default=attr.Factory(list))
78 unknown = attr.ib(default=attr.Factory(list))
79 ignored = attr.ib(default=attr.Factory(list))
79 ignored = attr.ib(default=attr.Factory(list))
80 clean = attr.ib(default=attr.Factory(list))
80 clean = attr.ib(default=attr.Factory(list))
81
81
82 def __iter__(self):
82 def __iter__(self):
83 yield self.modified
83 yield self.modified
84 yield self.added
84 yield self.added
85 yield self.removed
85 yield self.removed
86 yield self.deleted
86 yield self.deleted
87 yield self.unknown
87 yield self.unknown
88 yield self.ignored
88 yield self.ignored
89 yield self.clean
89 yield self.clean
90
90
91 def __repr__(self):
91 def __repr__(self):
92 return (
92 return (
93 r'<status modified=%s, added=%s, removed=%s, deleted=%s, '
93 r'<status modified=%s, added=%s, removed=%s, deleted=%s, '
94 r'unknown=%s, ignored=%s, clean=%s>'
94 r'unknown=%s, ignored=%s, clean=%s>'
95 ) % tuple(pycompat.sysstr(stringutil.pprint(v)) for v in self)
95 ) % tuple(pycompat.sysstr(stringutil.pprint(v)) for v in self)
96
96
97
97
98 def itersubrepos(ctx1, ctx2):
98 def itersubrepos(ctx1, ctx2):
99 """find subrepos in ctx1 or ctx2"""
99 """find subrepos in ctx1 or ctx2"""
100 # Create a (subpath, ctx) mapping where we prefer subpaths from
100 # Create a (subpath, ctx) mapping where we prefer subpaths from
101 # ctx1. The subpaths from ctx2 are important when the .hgsub file
101 # ctx1. The subpaths from ctx2 are important when the .hgsub file
102 # has been modified (in ctx2) but not yet committed (in ctx1).
102 # has been modified (in ctx2) but not yet committed (in ctx1).
103 subpaths = dict.fromkeys(ctx2.substate, ctx2)
103 subpaths = dict.fromkeys(ctx2.substate, ctx2)
104 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
104 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
105
105
106 missing = set()
106 missing = set()
107
107
108 for subpath in ctx2.substate:
108 for subpath in ctx2.substate:
109 if subpath not in ctx1.substate:
109 if subpath not in ctx1.substate:
110 del subpaths[subpath]
110 del subpaths[subpath]
111 missing.add(subpath)
111 missing.add(subpath)
112
112
113 for subpath, ctx in sorted(pycompat.iteritems(subpaths)):
113 for subpath, ctx in sorted(pycompat.iteritems(subpaths)):
114 yield subpath, ctx.sub(subpath)
114 yield subpath, ctx.sub(subpath)
115
115
116 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
116 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
117 # status and diff will have an accurate result when it does
117 # status and diff will have an accurate result when it does
118 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
118 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
119 # against itself.
119 # against itself.
120 for subpath in missing:
120 for subpath in missing:
121 yield subpath, ctx2.nullsub(subpath, ctx1)
121 yield subpath, ctx2.nullsub(subpath, ctx1)
122
122
123
123
124 def nochangesfound(ui, repo, excluded=None):
124 def nochangesfound(ui, repo, excluded=None):
125 '''Report no changes for push/pull, excluded is None or a list of
125 '''Report no changes for push/pull, excluded is None or a list of
126 nodes excluded from the push/pull.
126 nodes excluded from the push/pull.
127 '''
127 '''
128 secretlist = []
128 secretlist = []
129 if excluded:
129 if excluded:
130 for n in excluded:
130 for n in excluded:
131 ctx = repo[n]
131 ctx = repo[n]
132 if ctx.phase() >= phases.secret and not ctx.extinct():
132 if ctx.phase() >= phases.secret and not ctx.extinct():
133 secretlist.append(n)
133 secretlist.append(n)
134
134
135 if secretlist:
135 if secretlist:
136 ui.status(
136 ui.status(
137 _(b"no changes found (ignored %d secret changesets)\n")
137 _(b"no changes found (ignored %d secret changesets)\n")
138 % len(secretlist)
138 % len(secretlist)
139 )
139 )
140 else:
140 else:
141 ui.status(_(b"no changes found\n"))
141 ui.status(_(b"no changes found\n"))
142
142
143
143
144 def callcatch(ui, func):
144 def callcatch(ui, func):
145 """call func() with global exception handling
145 """call func() with global exception handling
146
146
147 return func() if no exception happens. otherwise do some error handling
147 return func() if no exception happens. otherwise do some error handling
148 and return an exit code accordingly. does not handle all exceptions.
148 and return an exit code accordingly. does not handle all exceptions.
149 """
149 """
150 try:
150 try:
151 try:
151 try:
152 return func()
152 return func()
153 except: # re-raises
153 except: # re-raises
154 ui.traceback()
154 ui.traceback()
155 raise
155 raise
156 # Global exception handling, alphabetically
156 # Global exception handling, alphabetically
157 # Mercurial-specific first, followed by built-in and library exceptions
157 # Mercurial-specific first, followed by built-in and library exceptions
158 except error.LockHeld as inst:
158 except error.LockHeld as inst:
159 if inst.errno == errno.ETIMEDOUT:
159 if inst.errno == errno.ETIMEDOUT:
160 reason = _(b'timed out waiting for lock held by %r') % (
160 reason = _(b'timed out waiting for lock held by %r') % (
161 pycompat.bytestr(inst.locker)
161 pycompat.bytestr(inst.locker)
162 )
162 )
163 else:
163 else:
164 reason = _(b'lock held by %r') % inst.locker
164 reason = _(b'lock held by %r') % inst.locker
165 ui.error(
165 ui.error(
166 _(b"abort: %s: %s\n")
166 _(b"abort: %s: %s\n")
167 % (inst.desc or stringutil.forcebytestr(inst.filename), reason)
167 % (inst.desc or stringutil.forcebytestr(inst.filename), reason)
168 )
168 )
169 if not inst.locker:
169 if not inst.locker:
170 ui.error(_(b"(lock might be very busy)\n"))
170 ui.error(_(b"(lock might be very busy)\n"))
171 except error.LockUnavailable as inst:
171 except error.LockUnavailable as inst:
172 ui.error(
172 ui.error(
173 _(b"abort: could not lock %s: %s\n")
173 _(b"abort: could not lock %s: %s\n")
174 % (
174 % (
175 inst.desc or stringutil.forcebytestr(inst.filename),
175 inst.desc or stringutil.forcebytestr(inst.filename),
176 encoding.strtolocal(inst.strerror),
176 encoding.strtolocal(inst.strerror),
177 )
177 )
178 )
178 )
179 except error.OutOfBandError as inst:
179 except error.OutOfBandError as inst:
180 if inst.args:
180 if inst.args:
181 msg = _(b"abort: remote error:\n")
181 msg = _(b"abort: remote error:\n")
182 else:
182 else:
183 msg = _(b"abort: remote error\n")
183 msg = _(b"abort: remote error\n")
184 ui.error(msg)
184 ui.error(msg)
185 if inst.args:
185 if inst.args:
186 ui.error(b''.join(inst.args))
186 ui.error(b''.join(inst.args))
187 if inst.hint:
187 if inst.hint:
188 ui.error(b'(%s)\n' % inst.hint)
188 ui.error(b'(%s)\n' % inst.hint)
189 except error.RepoError as inst:
189 except error.RepoError as inst:
190 ui.error(_(b"abort: %s!\n") % inst)
190 ui.error(_(b"abort: %s!\n") % inst)
191 if inst.hint:
191 if inst.hint:
192 ui.error(_(b"(%s)\n") % inst.hint)
192 ui.error(_(b"(%s)\n") % inst.hint)
193 except error.ResponseError as inst:
193 except error.ResponseError as inst:
194 ui.error(_(b"abort: %s") % inst.args[0])
194 ui.error(_(b"abort: %s") % inst.args[0])
195 msg = inst.args[1]
195 msg = inst.args[1]
196 if isinstance(msg, type(u'')):
196 if isinstance(msg, type(u'')):
197 msg = pycompat.sysbytes(msg)
197 msg = pycompat.sysbytes(msg)
198 if not isinstance(msg, bytes):
198 if not isinstance(msg, bytes):
199 ui.error(b" %r\n" % (msg,))
199 ui.error(b" %r\n" % (msg,))
200 elif not msg:
200 elif not msg:
201 ui.error(_(b" empty string\n"))
201 ui.error(_(b" empty string\n"))
202 else:
202 else:
203 ui.error(b"\n%r\n" % pycompat.bytestr(stringutil.ellipsis(msg)))
203 ui.error(b"\n%r\n" % pycompat.bytestr(stringutil.ellipsis(msg)))
204 except error.CensoredNodeError as inst:
204 except error.CensoredNodeError as inst:
205 ui.error(_(b"abort: file censored %s!\n") % inst)
205 ui.error(_(b"abort: file censored %s!\n") % inst)
206 except error.StorageError as inst:
206 except error.StorageError as inst:
207 ui.error(_(b"abort: %s!\n") % inst)
207 ui.error(_(b"abort: %s!\n") % inst)
208 if inst.hint:
208 if inst.hint:
209 ui.error(_(b"(%s)\n") % inst.hint)
209 ui.error(_(b"(%s)\n") % inst.hint)
210 except error.InterventionRequired as inst:
210 except error.InterventionRequired as inst:
211 ui.error(b"%s\n" % inst)
211 ui.error(b"%s\n" % inst)
212 if inst.hint:
212 if inst.hint:
213 ui.error(_(b"(%s)\n") % inst.hint)
213 ui.error(_(b"(%s)\n") % inst.hint)
214 return 1
214 return 1
215 except error.WdirUnsupported:
215 except error.WdirUnsupported:
216 ui.error(_(b"abort: working directory revision cannot be specified\n"))
216 ui.error(_(b"abort: working directory revision cannot be specified\n"))
217 except error.Abort as inst:
217 except error.Abort as inst:
218 ui.error(_(b"abort: %s\n") % inst)
218 ui.error(_(b"abort: %s\n") % inst)
219 if inst.hint:
219 if inst.hint:
220 ui.error(_(b"(%s)\n") % inst.hint)
220 ui.error(_(b"(%s)\n") % inst.hint)
221 except ImportError as inst:
221 except ImportError as inst:
222 ui.error(_(b"abort: %s!\n") % stringutil.forcebytestr(inst))
222 ui.error(_(b"abort: %s!\n") % stringutil.forcebytestr(inst))
223 m = stringutil.forcebytestr(inst).split()[-1]
223 m = stringutil.forcebytestr(inst).split()[-1]
224 if m in b"mpatch bdiff".split():
224 if m in b"mpatch bdiff".split():
225 ui.error(_(b"(did you forget to compile extensions?)\n"))
225 ui.error(_(b"(did you forget to compile extensions?)\n"))
226 elif m in b"zlib".split():
226 elif m in b"zlib".split():
227 ui.error(_(b"(is your Python install correct?)\n"))
227 ui.error(_(b"(is your Python install correct?)\n"))
228 except (IOError, OSError) as inst:
228 except (IOError, OSError) as inst:
229 if util.safehasattr(inst, b"code"): # HTTPError
229 if util.safehasattr(inst, b"code"): # HTTPError
230 ui.error(_(b"abort: %s\n") % stringutil.forcebytestr(inst))
230 ui.error(_(b"abort: %s\n") % stringutil.forcebytestr(inst))
231 elif util.safehasattr(inst, b"reason"): # URLError or SSLError
231 elif util.safehasattr(inst, b"reason"): # URLError or SSLError
232 try: # usually it is in the form (errno, strerror)
232 try: # usually it is in the form (errno, strerror)
233 reason = inst.reason.args[1]
233 reason = inst.reason.args[1]
234 except (AttributeError, IndexError):
234 except (AttributeError, IndexError):
235 # it might be anything, for example a string
235 # it might be anything, for example a string
236 reason = inst.reason
236 reason = inst.reason
237 if isinstance(reason, pycompat.unicode):
237 if isinstance(reason, pycompat.unicode):
238 # SSLError of Python 2.7.9 contains a unicode
238 # SSLError of Python 2.7.9 contains a unicode
239 reason = encoding.unitolocal(reason)
239 reason = encoding.unitolocal(reason)
240 ui.error(_(b"abort: error: %s\n") % stringutil.forcebytestr(reason))
240 ui.error(_(b"abort: error: %s\n") % stringutil.forcebytestr(reason))
241 elif (
241 elif (
242 util.safehasattr(inst, b"args")
242 util.safehasattr(inst, b"args")
243 and inst.args
243 and inst.args
244 and inst.args[0] == errno.EPIPE
244 and inst.args[0] == errno.EPIPE
245 ):
245 ):
246 pass
246 pass
247 elif getattr(inst, "strerror", None): # common IOError or OSError
247 elif getattr(inst, "strerror", None): # common IOError or OSError
248 if getattr(inst, "filename", None) is not None:
248 if getattr(inst, "filename", None) is not None:
249 ui.error(
249 ui.error(
250 _(b"abort: %s: '%s'\n")
250 _(b"abort: %s: '%s'\n")
251 % (
251 % (
252 encoding.strtolocal(inst.strerror),
252 encoding.strtolocal(inst.strerror),
253 stringutil.forcebytestr(inst.filename),
253 stringutil.forcebytestr(inst.filename),
254 )
254 )
255 )
255 )
256 else:
256 else:
257 ui.error(_(b"abort: %s\n") % encoding.strtolocal(inst.strerror))
257 ui.error(_(b"abort: %s\n") % encoding.strtolocal(inst.strerror))
258 else: # suspicious IOError
258 else: # suspicious IOError
259 raise
259 raise
260 except MemoryError:
260 except MemoryError:
261 ui.error(_(b"abort: out of memory\n"))
261 ui.error(_(b"abort: out of memory\n"))
262 except SystemExit as inst:
262 except SystemExit as inst:
263 # Commands shouldn't sys.exit directly, but give a return code.
263 # Commands shouldn't sys.exit directly, but give a return code.
264 # Just in case catch this and and pass exit code to caller.
264 # Just in case catch this and and pass exit code to caller.
265 return inst.code
265 return inst.code
266
266
267 return -1
267 return -1
268
268
269
269
270 def checknewlabel(repo, lbl, kind):
270 def checknewlabel(repo, lbl, kind):
271 # Do not use the "kind" parameter in ui output.
271 # Do not use the "kind" parameter in ui output.
272 # It makes strings difficult to translate.
272 # It makes strings difficult to translate.
273 if lbl in [b'tip', b'.', b'null']:
273 if lbl in [b'tip', b'.', b'null']:
274 raise error.Abort(_(b"the name '%s' is reserved") % lbl)
274 raise error.Abort(_(b"the name '%s' is reserved") % lbl)
275 for c in (b':', b'\0', b'\n', b'\r'):
275 for c in (b':', b'\0', b'\n', b'\r'):
276 if c in lbl:
276 if c in lbl:
277 raise error.Abort(
277 raise error.Abort(
278 _(b"%r cannot be used in a name") % pycompat.bytestr(c)
278 _(b"%r cannot be used in a name") % pycompat.bytestr(c)
279 )
279 )
280 try:
280 try:
281 int(lbl)
281 int(lbl)
282 raise error.Abort(_(b"cannot use an integer as a name"))
282 raise error.Abort(_(b"cannot use an integer as a name"))
283 except ValueError:
283 except ValueError:
284 pass
284 pass
285 if lbl.strip() != lbl:
285 if lbl.strip() != lbl:
286 raise error.Abort(_(b"leading or trailing whitespace in name %r") % lbl)
286 raise error.Abort(_(b"leading or trailing whitespace in name %r") % lbl)
287
287
288
288
289 def checkfilename(f):
289 def checkfilename(f):
290 '''Check that the filename f is an acceptable filename for a tracked file'''
290 '''Check that the filename f is an acceptable filename for a tracked file'''
291 if b'\r' in f or b'\n' in f:
291 if b'\r' in f or b'\n' in f:
292 raise error.Abort(
292 raise error.Abort(
293 _(b"'\\n' and '\\r' disallowed in filenames: %r")
293 _(b"'\\n' and '\\r' disallowed in filenames: %r")
294 % pycompat.bytestr(f)
294 % pycompat.bytestr(f)
295 )
295 )
296
296
297
297
298 def checkportable(ui, f):
298 def checkportable(ui, f):
299 '''Check if filename f is portable and warn or abort depending on config'''
299 '''Check if filename f is portable and warn or abort depending on config'''
300 checkfilename(f)
300 checkfilename(f)
301 abort, warn = checkportabilityalert(ui)
301 abort, warn = checkportabilityalert(ui)
302 if abort or warn:
302 if abort or warn:
303 msg = util.checkwinfilename(f)
303 msg = util.checkwinfilename(f)
304 if msg:
304 if msg:
305 msg = b"%s: %s" % (msg, procutil.shellquote(f))
305 msg = b"%s: %s" % (msg, procutil.shellquote(f))
306 if abort:
306 if abort:
307 raise error.Abort(msg)
307 raise error.Abort(msg)
308 ui.warn(_(b"warning: %s\n") % msg)
308 ui.warn(_(b"warning: %s\n") % msg)
309
309
310
310
311 def checkportabilityalert(ui):
311 def checkportabilityalert(ui):
312 '''check if the user's config requests nothing, a warning, or abort for
312 '''check if the user's config requests nothing, a warning, or abort for
313 non-portable filenames'''
313 non-portable filenames'''
314 val = ui.config(b'ui', b'portablefilenames')
314 val = ui.config(b'ui', b'portablefilenames')
315 lval = val.lower()
315 lval = val.lower()
316 bval = stringutil.parsebool(val)
316 bval = stringutil.parsebool(val)
317 abort = pycompat.iswindows or lval == b'abort'
317 abort = pycompat.iswindows or lval == b'abort'
318 warn = bval or lval == b'warn'
318 warn = bval or lval == b'warn'
319 if bval is None and not (warn or abort or lval == b'ignore'):
319 if bval is None and not (warn or abort or lval == b'ignore'):
320 raise error.ConfigError(
320 raise error.ConfigError(
321 _(b"ui.portablefilenames value is invalid ('%s')") % val
321 _(b"ui.portablefilenames value is invalid ('%s')") % val
322 )
322 )
323 return abort, warn
323 return abort, warn
324
324
325
325
326 class casecollisionauditor(object):
326 class casecollisionauditor(object):
327 def __init__(self, ui, abort, dirstate):
327 def __init__(self, ui, abort, dirstate):
328 self._ui = ui
328 self._ui = ui
329 self._abort = abort
329 self._abort = abort
330 allfiles = b'\0'.join(dirstate)
330 allfiles = b'\0'.join(dirstate)
331 self._loweredfiles = set(encoding.lower(allfiles).split(b'\0'))
331 self._loweredfiles = set(encoding.lower(allfiles).split(b'\0'))
332 self._dirstate = dirstate
332 self._dirstate = dirstate
333 # The purpose of _newfiles is so that we don't complain about
333 # The purpose of _newfiles is so that we don't complain about
334 # case collisions if someone were to call this object with the
334 # case collisions if someone were to call this object with the
335 # same filename twice.
335 # same filename twice.
336 self._newfiles = set()
336 self._newfiles = set()
337
337
338 def __call__(self, f):
338 def __call__(self, f):
339 if f in self._newfiles:
339 if f in self._newfiles:
340 return
340 return
341 fl = encoding.lower(f)
341 fl = encoding.lower(f)
342 if fl in self._loweredfiles and f not in self._dirstate:
342 if fl in self._loweredfiles and f not in self._dirstate:
343 msg = _(b'possible case-folding collision for %s') % f
343 msg = _(b'possible case-folding collision for %s') % f
344 if self._abort:
344 if self._abort:
345 raise error.Abort(msg)
345 raise error.Abort(msg)
346 self._ui.warn(_(b"warning: %s\n") % msg)
346 self._ui.warn(_(b"warning: %s\n") % msg)
347 self._loweredfiles.add(fl)
347 self._loweredfiles.add(fl)
348 self._newfiles.add(f)
348 self._newfiles.add(f)
349
349
350
350
351 def filteredhash(repo, maxrev):
351 def filteredhash(repo, maxrev):
352 """build hash of filtered revisions in the current repoview.
352 """build hash of filtered revisions in the current repoview.
353
353
354 Multiple caches perform up-to-date validation by checking that the
354 Multiple caches perform up-to-date validation by checking that the
355 tiprev and tipnode stored in the cache file match the current repository.
355 tiprev and tipnode stored in the cache file match the current repository.
356 However, this is not sufficient for validating repoviews because the set
356 However, this is not sufficient for validating repoviews because the set
357 of revisions in the view may change without the repository tiprev and
357 of revisions in the view may change without the repository tiprev and
358 tipnode changing.
358 tipnode changing.
359
359
360 This function hashes all the revs filtered from the view and returns
360 This function hashes all the revs filtered from the view and returns
361 that SHA-1 digest.
361 that SHA-1 digest.
362 """
362 """
363 cl = repo.changelog
363 cl = repo.changelog
364 if not cl.filteredrevs:
364 if not cl.filteredrevs:
365 return None
365 return None
366 key = None
366 key = None
367 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
367 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
368 if revs:
368 if revs:
369 s = hashlib.sha1()
369 s = hashlib.sha1()
370 for rev in revs:
370 for rev in revs:
371 s.update(b'%d;' % rev)
371 s.update(b'%d;' % rev)
372 key = s.digest()
372 key = s.digest()
373 return key
373 return key
374
374
375
375
376 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
376 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
377 '''yield every hg repository under path, always recursively.
377 '''yield every hg repository under path, always recursively.
378 The recurse flag will only control recursion into repo working dirs'''
378 The recurse flag will only control recursion into repo working dirs'''
379
379
380 def errhandler(err):
380 def errhandler(err):
381 if err.filename == path:
381 if err.filename == path:
382 raise err
382 raise err
383
383
384 samestat = getattr(os.path, 'samestat', None)
384 samestat = getattr(os.path, 'samestat', None)
385 if followsym and samestat is not None:
385 if followsym and samestat is not None:
386
386
387 def adddir(dirlst, dirname):
387 def adddir(dirlst, dirname):
388 dirstat = os.stat(dirname)
388 dirstat = os.stat(dirname)
389 match = any(samestat(dirstat, lstdirstat) for lstdirstat in dirlst)
389 match = any(samestat(dirstat, lstdirstat) for lstdirstat in dirlst)
390 if not match:
390 if not match:
391 dirlst.append(dirstat)
391 dirlst.append(dirstat)
392 return not match
392 return not match
393
393
394 else:
394 else:
395 followsym = False
395 followsym = False
396
396
397 if (seen_dirs is None) and followsym:
397 if (seen_dirs is None) and followsym:
398 seen_dirs = []
398 seen_dirs = []
399 adddir(seen_dirs, path)
399 adddir(seen_dirs, path)
400 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
400 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
401 dirs.sort()
401 dirs.sort()
402 if b'.hg' in dirs:
402 if b'.hg' in dirs:
403 yield root # found a repository
403 yield root # found a repository
404 qroot = os.path.join(root, b'.hg', b'patches')
404 qroot = os.path.join(root, b'.hg', b'patches')
405 if os.path.isdir(os.path.join(qroot, b'.hg')):
405 if os.path.isdir(os.path.join(qroot, b'.hg')):
406 yield qroot # we have a patch queue repo here
406 yield qroot # we have a patch queue repo here
407 if recurse:
407 if recurse:
408 # avoid recursing inside the .hg directory
408 # avoid recursing inside the .hg directory
409 dirs.remove(b'.hg')
409 dirs.remove(b'.hg')
410 else:
410 else:
411 dirs[:] = [] # don't descend further
411 dirs[:] = [] # don't descend further
412 elif followsym:
412 elif followsym:
413 newdirs = []
413 newdirs = []
414 for d in dirs:
414 for d in dirs:
415 fname = os.path.join(root, d)
415 fname = os.path.join(root, d)
416 if adddir(seen_dirs, fname):
416 if adddir(seen_dirs, fname):
417 if os.path.islink(fname):
417 if os.path.islink(fname):
418 for hgname in walkrepos(fname, True, seen_dirs):
418 for hgname in walkrepos(fname, True, seen_dirs):
419 yield hgname
419 yield hgname
420 else:
420 else:
421 newdirs.append(d)
421 newdirs.append(d)
422 dirs[:] = newdirs
422 dirs[:] = newdirs
423
423
424
424
425 def binnode(ctx):
425 def binnode(ctx):
426 """Return binary node id for a given basectx"""
426 """Return binary node id for a given basectx"""
427 node = ctx.node()
427 node = ctx.node()
428 if node is None:
428 if node is None:
429 return wdirid
429 return wdirid
430 return node
430 return node
431
431
432
432
433 def intrev(ctx):
433 def intrev(ctx):
434 """Return integer for a given basectx that can be used in comparison or
434 """Return integer for a given basectx that can be used in comparison or
435 arithmetic operation"""
435 arithmetic operation"""
436 rev = ctx.rev()
436 rev = ctx.rev()
437 if rev is None:
437 if rev is None:
438 return wdirrev
438 return wdirrev
439 return rev
439 return rev
440
440
441
441
442 def formatchangeid(ctx):
442 def formatchangeid(ctx):
443 """Format changectx as '{rev}:{node|formatnode}', which is the default
443 """Format changectx as '{rev}:{node|formatnode}', which is the default
444 template provided by logcmdutil.changesettemplater"""
444 template provided by logcmdutil.changesettemplater"""
445 repo = ctx.repo()
445 repo = ctx.repo()
446 return formatrevnode(repo.ui, intrev(ctx), binnode(ctx))
446 return formatrevnode(repo.ui, intrev(ctx), binnode(ctx))
447
447
448
448
449 def formatrevnode(ui, rev, node):
449 def formatrevnode(ui, rev, node):
450 """Format given revision and node depending on the current verbosity"""
450 """Format given revision and node depending on the current verbosity"""
451 if ui.debugflag:
451 if ui.debugflag:
452 hexfunc = hex
452 hexfunc = hex
453 else:
453 else:
454 hexfunc = short
454 hexfunc = short
455 return b'%d:%s' % (rev, hexfunc(node))
455 return b'%d:%s' % (rev, hexfunc(node))
456
456
457
457
458 def resolvehexnodeidprefix(repo, prefix):
458 def resolvehexnodeidprefix(repo, prefix):
459 if prefix.startswith(b'x') and repo.ui.configbool(
459 if prefix.startswith(b'x') and repo.ui.configbool(
460 b'experimental', b'revisions.prefixhexnode'
460 b'experimental', b'revisions.prefixhexnode'
461 ):
461 ):
462 prefix = prefix[1:]
462 prefix = prefix[1:]
463 try:
463 try:
464 # Uses unfiltered repo because it's faster when prefix is ambiguous/
464 # Uses unfiltered repo because it's faster when prefix is ambiguous/
465 # This matches the shortesthexnodeidprefix() function below.
465 # This matches the shortesthexnodeidprefix() function below.
466 node = repo.unfiltered().changelog._partialmatch(prefix)
466 node = repo.unfiltered().changelog._partialmatch(prefix)
467 except error.AmbiguousPrefixLookupError:
467 except error.AmbiguousPrefixLookupError:
468 revset = repo.ui.config(
468 revset = repo.ui.config(
469 b'experimental', b'revisions.disambiguatewithin'
469 b'experimental', b'revisions.disambiguatewithin'
470 )
470 )
471 if revset:
471 if revset:
472 # Clear config to avoid infinite recursion
472 # Clear config to avoid infinite recursion
473 configoverrides = {
473 configoverrides = {
474 (b'experimental', b'revisions.disambiguatewithin'): None
474 (b'experimental', b'revisions.disambiguatewithin'): None
475 }
475 }
476 with repo.ui.configoverride(configoverrides):
476 with repo.ui.configoverride(configoverrides):
477 revs = repo.anyrevs([revset], user=True)
477 revs = repo.anyrevs([revset], user=True)
478 matches = []
478 matches = []
479 for rev in revs:
479 for rev in revs:
480 node = repo.changelog.node(rev)
480 node = repo.changelog.node(rev)
481 if hex(node).startswith(prefix):
481 if hex(node).startswith(prefix):
482 matches.append(node)
482 matches.append(node)
483 if len(matches) == 1:
483 if len(matches) == 1:
484 return matches[0]
484 return matches[0]
485 raise
485 raise
486 if node is None:
486 if node is None:
487 return
487 return
488 repo.changelog.rev(node) # make sure node isn't filtered
488 repo.changelog.rev(node) # make sure node isn't filtered
489 return node
489 return node
490
490
491
491
492 def mayberevnum(repo, prefix):
492 def mayberevnum(repo, prefix):
493 """Checks if the given prefix may be mistaken for a revision number"""
493 """Checks if the given prefix may be mistaken for a revision number"""
494 try:
494 try:
495 i = int(prefix)
495 i = int(prefix)
496 # if we are a pure int, then starting with zero will not be
496 # if we are a pure int, then starting with zero will not be
497 # confused as a rev; or, obviously, if the int is larger
497 # confused as a rev; or, obviously, if the int is larger
498 # than the value of the tip rev. We still need to disambiguate if
498 # than the value of the tip rev. We still need to disambiguate if
499 # prefix == '0', since that *is* a valid revnum.
499 # prefix == '0', since that *is* a valid revnum.
500 if (prefix != b'0' and prefix[0:1] == b'0') or i >= len(repo):
500 if (prefix != b'0' and prefix[0:1] == b'0') or i >= len(repo):
501 return False
501 return False
502 return True
502 return True
503 except ValueError:
503 except ValueError:
504 return False
504 return False
505
505
506
506
507 def shortesthexnodeidprefix(repo, node, minlength=1, cache=None):
507 def shortesthexnodeidprefix(repo, node, minlength=1, cache=None):
508 """Find the shortest unambiguous prefix that matches hexnode.
508 """Find the shortest unambiguous prefix that matches hexnode.
509
509
510 If "cache" is not None, it must be a dictionary that can be used for
510 If "cache" is not None, it must be a dictionary that can be used for
511 caching between calls to this method.
511 caching between calls to this method.
512 """
512 """
513 # _partialmatch() of filtered changelog could take O(len(repo)) time,
513 # _partialmatch() of filtered changelog could take O(len(repo)) time,
514 # which would be unacceptably slow. so we look for hash collision in
514 # which would be unacceptably slow. so we look for hash collision in
515 # unfiltered space, which means some hashes may be slightly longer.
515 # unfiltered space, which means some hashes may be slightly longer.
516
516
517 minlength = max(minlength, 1)
517 minlength = max(minlength, 1)
518
518
519 def disambiguate(prefix):
519 def disambiguate(prefix):
520 """Disambiguate against revnums."""
520 """Disambiguate against revnums."""
521 if repo.ui.configbool(b'experimental', b'revisions.prefixhexnode'):
521 if repo.ui.configbool(b'experimental', b'revisions.prefixhexnode'):
522 if mayberevnum(repo, prefix):
522 if mayberevnum(repo, prefix):
523 return b'x' + prefix
523 return b'x' + prefix
524 else:
524 else:
525 return prefix
525 return prefix
526
526
527 hexnode = hex(node)
527 hexnode = hex(node)
528 for length in range(len(prefix), len(hexnode) + 1):
528 for length in range(len(prefix), len(hexnode) + 1):
529 prefix = hexnode[:length]
529 prefix = hexnode[:length]
530 if not mayberevnum(repo, prefix):
530 if not mayberevnum(repo, prefix):
531 return prefix
531 return prefix
532
532
533 cl = repo.unfiltered().changelog
533 cl = repo.unfiltered().changelog
534 revset = repo.ui.config(b'experimental', b'revisions.disambiguatewithin')
534 revset = repo.ui.config(b'experimental', b'revisions.disambiguatewithin')
535 if revset:
535 if revset:
536 revs = None
536 revs = None
537 if cache is not None:
537 if cache is not None:
538 revs = cache.get(b'disambiguationrevset')
538 revs = cache.get(b'disambiguationrevset')
539 if revs is None:
539 if revs is None:
540 revs = repo.anyrevs([revset], user=True)
540 revs = repo.anyrevs([revset], user=True)
541 if cache is not None:
541 if cache is not None:
542 cache[b'disambiguationrevset'] = revs
542 cache[b'disambiguationrevset'] = revs
543 if cl.rev(node) in revs:
543 if cl.rev(node) in revs:
544 hexnode = hex(node)
544 hexnode = hex(node)
545 nodetree = None
545 nodetree = None
546 if cache is not None:
546 if cache is not None:
547 nodetree = cache.get(b'disambiguationnodetree')
547 nodetree = cache.get(b'disambiguationnodetree')
548 if not nodetree:
548 if not nodetree:
549 if util.safehasattr(parsers, 'nodetree'):
549 if util.safehasattr(parsers, 'nodetree'):
550 # The CExt is the only implementation to provide a nodetree
550 # The CExt is the only implementation to provide a nodetree
551 # class so far.
551 # class so far.
552 index = cl.index
552 index = cl.index
553 if util.safehasattr(index, 'get_cindex'):
553 if util.safehasattr(index, 'get_cindex'):
554 # the rust wrapped need to give access to its internal index
554 # the rust wrapped need to give access to its internal index
555 index = index.get_cindex()
555 index = index.get_cindex()
556 nodetree = parsers.nodetree(index, len(revs))
556 nodetree = parsers.nodetree(index, len(revs))
557 for r in revs:
557 for r in revs:
558 nodetree.insert(r)
558 nodetree.insert(r)
559 if cache is not None:
559 if cache is not None:
560 cache[b'disambiguationnodetree'] = nodetree
560 cache[b'disambiguationnodetree'] = nodetree
561 if nodetree is not None:
561 if nodetree is not None:
562 length = max(nodetree.shortest(node), minlength)
562 length = max(nodetree.shortest(node), minlength)
563 prefix = hexnode[:length]
563 prefix = hexnode[:length]
564 return disambiguate(prefix)
564 return disambiguate(prefix)
565 for length in range(minlength, len(hexnode) + 1):
565 for length in range(minlength, len(hexnode) + 1):
566 matches = []
566 matches = []
567 prefix = hexnode[:length]
567 prefix = hexnode[:length]
568 for rev in revs:
568 for rev in revs:
569 otherhexnode = repo[rev].hex()
569 otherhexnode = repo[rev].hex()
570 if prefix == otherhexnode[:length]:
570 if prefix == otherhexnode[:length]:
571 matches.append(otherhexnode)
571 matches.append(otherhexnode)
572 if len(matches) == 1:
572 if len(matches) == 1:
573 return disambiguate(prefix)
573 return disambiguate(prefix)
574
574
575 try:
575 try:
576 return disambiguate(cl.shortest(node, minlength))
576 return disambiguate(cl.shortest(node, minlength))
577 except error.LookupError:
577 except error.LookupError:
578 raise error.RepoLookupError()
578 raise error.RepoLookupError()
579
579
580
580
581 def isrevsymbol(repo, symbol):
581 def isrevsymbol(repo, symbol):
582 """Checks if a symbol exists in the repo.
582 """Checks if a symbol exists in the repo.
583
583
584 See revsymbol() for details. Raises error.AmbiguousPrefixLookupError if the
584 See revsymbol() for details. Raises error.AmbiguousPrefixLookupError if the
585 symbol is an ambiguous nodeid prefix.
585 symbol is an ambiguous nodeid prefix.
586 """
586 """
587 try:
587 try:
588 revsymbol(repo, symbol)
588 revsymbol(repo, symbol)
589 return True
589 return True
590 except error.RepoLookupError:
590 except error.RepoLookupError:
591 return False
591 return False
592
592
593
593
594 def revsymbol(repo, symbol):
594 def revsymbol(repo, symbol):
595 """Returns a context given a single revision symbol (as string).
595 """Returns a context given a single revision symbol (as string).
596
596
597 This is similar to revsingle(), but accepts only a single revision symbol,
597 This is similar to revsingle(), but accepts only a single revision symbol,
598 i.e. things like ".", "tip", "1234", "deadbeef", "my-bookmark" work, but
598 i.e. things like ".", "tip", "1234", "deadbeef", "my-bookmark" work, but
599 not "max(public())".
599 not "max(public())".
600 """
600 """
601 if not isinstance(symbol, bytes):
601 if not isinstance(symbol, bytes):
602 msg = (
602 msg = (
603 b"symbol (%s of type %s) was not a string, did you mean "
603 b"symbol (%s of type %s) was not a string, did you mean "
604 b"repo[symbol]?" % (symbol, type(symbol))
604 b"repo[symbol]?" % (symbol, type(symbol))
605 )
605 )
606 raise error.ProgrammingError(msg)
606 raise error.ProgrammingError(msg)
607 try:
607 try:
608 if symbol in (b'.', b'tip', b'null'):
608 if symbol in (b'.', b'tip', b'null'):
609 return repo[symbol]
609 return repo[symbol]
610
610
611 try:
611 try:
612 r = int(symbol)
612 r = int(symbol)
613 if b'%d' % r != symbol:
613 if b'%d' % r != symbol:
614 raise ValueError
614 raise ValueError
615 l = len(repo.changelog)
615 l = len(repo.changelog)
616 if r < 0:
616 if r < 0:
617 r += l
617 r += l
618 if r < 0 or r >= l and r != wdirrev:
618 if r < 0 or r >= l and r != wdirrev:
619 raise ValueError
619 raise ValueError
620 return repo[r]
620 return repo[r]
621 except error.FilteredIndexError:
621 except error.FilteredIndexError:
622 raise
622 raise
623 except (ValueError, OverflowError, IndexError):
623 except (ValueError, OverflowError, IndexError):
624 pass
624 pass
625
625
626 if len(symbol) == 40:
626 if len(symbol) == 40:
627 try:
627 try:
628 node = bin(symbol)
628 node = bin(symbol)
629 rev = repo.changelog.rev(node)
629 rev = repo.changelog.rev(node)
630 return repo[rev]
630 return repo[rev]
631 except error.FilteredLookupError:
631 except error.FilteredLookupError:
632 raise
632 raise
633 except (TypeError, LookupError):
633 except (TypeError, LookupError):
634 pass
634 pass
635
635
636 # look up bookmarks through the name interface
636 # look up bookmarks through the name interface
637 try:
637 try:
638 node = repo.names.singlenode(repo, symbol)
638 node = repo.names.singlenode(repo, symbol)
639 rev = repo.changelog.rev(node)
639 rev = repo.changelog.rev(node)
640 return repo[rev]
640 return repo[rev]
641 except KeyError:
641 except KeyError:
642 pass
642 pass
643
643
644 node = resolvehexnodeidprefix(repo, symbol)
644 node = resolvehexnodeidprefix(repo, symbol)
645 if node is not None:
645 if node is not None:
646 rev = repo.changelog.rev(node)
646 rev = repo.changelog.rev(node)
647 return repo[rev]
647 return repo[rev]
648
648
649 raise error.RepoLookupError(_(b"unknown revision '%s'") % symbol)
649 raise error.RepoLookupError(_(b"unknown revision '%s'") % symbol)
650
650
651 except error.WdirUnsupported:
651 except error.WdirUnsupported:
652 return repo[None]
652 return repo[None]
653 except (
653 except (
654 error.FilteredIndexError,
654 error.FilteredIndexError,
655 error.FilteredLookupError,
655 error.FilteredLookupError,
656 error.FilteredRepoLookupError,
656 error.FilteredRepoLookupError,
657 ):
657 ):
658 raise _filterederror(repo, symbol)
658 raise _filterederror(repo, symbol)
659
659
660
660
661 def _filterederror(repo, changeid):
661 def _filterederror(repo, changeid):
662 """build an exception to be raised about a filtered changeid
662 """build an exception to be raised about a filtered changeid
663
663
664 This is extracted in a function to help extensions (eg: evolve) to
664 This is extracted in a function to help extensions (eg: evolve) to
665 experiment with various message variants."""
665 experiment with various message variants."""
666 if repo.filtername.startswith(b'visible'):
666 if repo.filtername.startswith(b'visible'):
667
667
668 # Check if the changeset is obsolete
668 # Check if the changeset is obsolete
669 unfilteredrepo = repo.unfiltered()
669 unfilteredrepo = repo.unfiltered()
670 ctx = revsymbol(unfilteredrepo, changeid)
670 ctx = revsymbol(unfilteredrepo, changeid)
671
671
672 # If the changeset is obsolete, enrich the message with the reason
672 # If the changeset is obsolete, enrich the message with the reason
673 # that made this changeset not visible
673 # that made this changeset not visible
674 if ctx.obsolete():
674 if ctx.obsolete():
675 msg = obsutil._getfilteredreason(repo, changeid, ctx)
675 msg = obsutil._getfilteredreason(repo, changeid, ctx)
676 else:
676 else:
677 msg = _(b"hidden revision '%s'") % changeid
677 msg = _(b"hidden revision '%s'") % changeid
678
678
679 hint = _(b'use --hidden to access hidden revisions')
679 hint = _(b'use --hidden to access hidden revisions')
680
680
681 return error.FilteredRepoLookupError(msg, hint=hint)
681 return error.FilteredRepoLookupError(msg, hint=hint)
682 msg = _(b"filtered revision '%s' (not in '%s' subset)")
682 msg = _(b"filtered revision '%s' (not in '%s' subset)")
683 msg %= (changeid, repo.filtername)
683 msg %= (changeid, repo.filtername)
684 return error.FilteredRepoLookupError(msg)
684 return error.FilteredRepoLookupError(msg)
685
685
686
686
687 def revsingle(repo, revspec, default=b'.', localalias=None):
687 def revsingle(repo, revspec, default=b'.', localalias=None):
688 if not revspec and revspec != 0:
688 if not revspec and revspec != 0:
689 return repo[default]
689 return repo[default]
690
690
691 l = revrange(repo, [revspec], localalias=localalias)
691 l = revrange(repo, [revspec], localalias=localalias)
692 if not l:
692 if not l:
693 raise error.Abort(_(b'empty revision set'))
693 raise error.Abort(_(b'empty revision set'))
694 return repo[l.last()]
694 return repo[l.last()]
695
695
696
696
697 def _pairspec(revspec):
697 def _pairspec(revspec):
698 tree = revsetlang.parse(revspec)
698 tree = revsetlang.parse(revspec)
699 return tree and tree[0] in (
699 return tree and tree[0] in (
700 b'range',
700 b'range',
701 b'rangepre',
701 b'rangepre',
702 b'rangepost',
702 b'rangepost',
703 b'rangeall',
703 b'rangeall',
704 )
704 )
705
705
706
706
707 def revpair(repo, revs):
707 def revpair(repo, revs):
708 if not revs:
708 if not revs:
709 return repo[b'.'], repo[None]
709 return repo[b'.'], repo[None]
710
710
711 l = revrange(repo, revs)
711 l = revrange(repo, revs)
712
712
713 if not l:
713 if not l:
714 raise error.Abort(_(b'empty revision range'))
714 raise error.Abort(_(b'empty revision range'))
715
715
716 first = l.first()
716 first = l.first()
717 second = l.last()
717 second = l.last()
718
718
719 if (
719 if (
720 first == second
720 first == second
721 and len(revs) >= 2
721 and len(revs) >= 2
722 and not all(revrange(repo, [r]) for r in revs)
722 and not all(revrange(repo, [r]) for r in revs)
723 ):
723 ):
724 raise error.Abort(_(b'empty revision on one side of range'))
724 raise error.Abort(_(b'empty revision on one side of range'))
725
725
726 # if top-level is range expression, the result must always be a pair
726 # if top-level is range expression, the result must always be a pair
727 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
727 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
728 return repo[first], repo[None]
728 return repo[first], repo[None]
729
729
730 return repo[first], repo[second]
730 return repo[first], repo[second]
731
731
732
732
733 def revrange(repo, specs, localalias=None):
733 def revrange(repo, specs, localalias=None):
734 """Execute 1 to many revsets and return the union.
734 """Execute 1 to many revsets and return the union.
735
735
736 This is the preferred mechanism for executing revsets using user-specified
736 This is the preferred mechanism for executing revsets using user-specified
737 config options, such as revset aliases.
737 config options, such as revset aliases.
738
738
739 The revsets specified by ``specs`` will be executed via a chained ``OR``
739 The revsets specified by ``specs`` will be executed via a chained ``OR``
740 expression. If ``specs`` is empty, an empty result is returned.
740 expression. If ``specs`` is empty, an empty result is returned.
741
741
742 ``specs`` can contain integers, in which case they are assumed to be
742 ``specs`` can contain integers, in which case they are assumed to be
743 revision numbers.
743 revision numbers.
744
744
745 It is assumed the revsets are already formatted. If you have arguments
745 It is assumed the revsets are already formatted. If you have arguments
746 that need to be expanded in the revset, call ``revsetlang.formatspec()``
746 that need to be expanded in the revset, call ``revsetlang.formatspec()``
747 and pass the result as an element of ``specs``.
747 and pass the result as an element of ``specs``.
748
748
749 Specifying a single revset is allowed.
749 Specifying a single revset is allowed.
750
750
751 Returns a ``revset.abstractsmartset`` which is a list-like interface over
751 Returns a ``revset.abstractsmartset`` which is a list-like interface over
752 integer revisions.
752 integer revisions.
753 """
753 """
754 allspecs = []
754 allspecs = []
755 for spec in specs:
755 for spec in specs:
756 if isinstance(spec, int):
756 if isinstance(spec, int):
757 spec = revsetlang.formatspec(b'%d', spec)
757 spec = revsetlang.formatspec(b'%d', spec)
758 allspecs.append(spec)
758 allspecs.append(spec)
759 return repo.anyrevs(allspecs, user=True, localalias=localalias)
759 return repo.anyrevs(allspecs, user=True, localalias=localalias)
760
760
761
761
762 def meaningfulparents(repo, ctx):
762 def meaningfulparents(repo, ctx):
763 """Return list of meaningful (or all if debug) parentrevs for rev.
763 """Return list of meaningful (or all if debug) parentrevs for rev.
764
764
765 For merges (two non-nullrev revisions) both parents are meaningful.
765 For merges (two non-nullrev revisions) both parents are meaningful.
766 Otherwise the first parent revision is considered meaningful if it
766 Otherwise the first parent revision is considered meaningful if it
767 is not the preceding revision.
767 is not the preceding revision.
768 """
768 """
769 parents = ctx.parents()
769 parents = ctx.parents()
770 if len(parents) > 1:
770 if len(parents) > 1:
771 return parents
771 return parents
772 if repo.ui.debugflag:
772 if repo.ui.debugflag:
773 return [parents[0], repo[nullrev]]
773 return [parents[0], repo[nullrev]]
774 if parents[0].rev() >= intrev(ctx) - 1:
774 if parents[0].rev() >= intrev(ctx) - 1:
775 return []
775 return []
776 return parents
776 return parents
777
777
778
778
779 def getuipathfn(repo, legacyrelativevalue=False, forcerelativevalue=None):
779 def getuipathfn(repo, legacyrelativevalue=False, forcerelativevalue=None):
780 """Return a function that produced paths for presenting to the user.
780 """Return a function that produced paths for presenting to the user.
781
781
782 The returned function takes a repo-relative path and produces a path
782 The returned function takes a repo-relative path and produces a path
783 that can be presented in the UI.
783 that can be presented in the UI.
784
784
785 Depending on the value of ui.relative-paths, either a repo-relative or
785 Depending on the value of ui.relative-paths, either a repo-relative or
786 cwd-relative path will be produced.
786 cwd-relative path will be produced.
787
787
788 legacyrelativevalue is the value to use if ui.relative-paths=legacy
788 legacyrelativevalue is the value to use if ui.relative-paths=legacy
789
789
790 If forcerelativevalue is not None, then that value will be used regardless
790 If forcerelativevalue is not None, then that value will be used regardless
791 of what ui.relative-paths is set to.
791 of what ui.relative-paths is set to.
792 """
792 """
793 if forcerelativevalue is not None:
793 if forcerelativevalue is not None:
794 relative = forcerelativevalue
794 relative = forcerelativevalue
795 else:
795 else:
796 config = repo.ui.config(b'ui', b'relative-paths')
796 config = repo.ui.config(b'ui', b'relative-paths')
797 if config == b'legacy':
797 if config == b'legacy':
798 relative = legacyrelativevalue
798 relative = legacyrelativevalue
799 else:
799 else:
800 relative = stringutil.parsebool(config)
800 relative = stringutil.parsebool(config)
801 if relative is None:
801 if relative is None:
802 raise error.ConfigError(
802 raise error.ConfigError(
803 _(b"ui.relative-paths is not a boolean ('%s')") % config
803 _(b"ui.relative-paths is not a boolean ('%s')") % config
804 )
804 )
805
805
806 if relative:
806 if relative:
807 cwd = repo.getcwd()
807 cwd = repo.getcwd()
808 pathto = repo.pathto
808 pathto = repo.pathto
809 return lambda f: pathto(f, cwd)
809 return lambda f: pathto(f, cwd)
810 elif repo.ui.configbool(b'ui', b'slash'):
810 elif repo.ui.configbool(b'ui', b'slash'):
811 return lambda f: f
811 return lambda f: f
812 else:
812 else:
813 return util.localpath
813 return util.localpath
814
814
815
815
816 def subdiruipathfn(subpath, uipathfn):
816 def subdiruipathfn(subpath, uipathfn):
817 '''Create a new uipathfn that treats the file as relative to subpath.'''
817 '''Create a new uipathfn that treats the file as relative to subpath.'''
818 return lambda f: uipathfn(posixpath.join(subpath, f))
818 return lambda f: uipathfn(posixpath.join(subpath, f))
819
819
820
820
821 def anypats(pats, opts):
821 def anypats(pats, opts):
822 '''Checks if any patterns, including --include and --exclude were given.
822 '''Checks if any patterns, including --include and --exclude were given.
823
823
824 Some commands (e.g. addremove) use this condition for deciding whether to
824 Some commands (e.g. addremove) use this condition for deciding whether to
825 print absolute or relative paths.
825 print absolute or relative paths.
826 '''
826 '''
827 return bool(pats or opts.get(b'include') or opts.get(b'exclude'))
827 return bool(pats or opts.get(b'include') or opts.get(b'exclude'))
828
828
829
829
830 def expandpats(pats):
830 def expandpats(pats):
831 '''Expand bare globs when running on windows.
831 '''Expand bare globs when running on windows.
832 On posix we assume it already has already been done by sh.'''
832 On posix we assume it already has already been done by sh.'''
833 if not util.expandglobs:
833 if not util.expandglobs:
834 return list(pats)
834 return list(pats)
835 ret = []
835 ret = []
836 for kindpat in pats:
836 for kindpat in pats:
837 kind, pat = matchmod._patsplit(kindpat, None)
837 kind, pat = matchmod._patsplit(kindpat, None)
838 if kind is None:
838 if kind is None:
839 try:
839 try:
840 globbed = glob.glob(pat)
840 globbed = glob.glob(pat)
841 except re.error:
841 except re.error:
842 globbed = [pat]
842 globbed = [pat]
843 if globbed:
843 if globbed:
844 ret.extend(globbed)
844 ret.extend(globbed)
845 continue
845 continue
846 ret.append(kindpat)
846 ret.append(kindpat)
847 return ret
847 return ret
848
848
849
849
850 def matchandpats(
850 def matchandpats(
851 ctx, pats=(), opts=None, globbed=False, default=b'relpath', badfn=None
851 ctx, pats=(), opts=None, globbed=False, default=b'relpath', badfn=None
852 ):
852 ):
853 '''Return a matcher and the patterns that were used.
853 '''Return a matcher and the patterns that were used.
854 The matcher will warn about bad matches, unless an alternate badfn callback
854 The matcher will warn about bad matches, unless an alternate badfn callback
855 is provided.'''
855 is provided.'''
856 if opts is None:
856 if opts is None:
857 opts = {}
857 opts = {}
858 if not globbed and default == b'relpath':
858 if not globbed and default == b'relpath':
859 pats = expandpats(pats or [])
859 pats = expandpats(pats or [])
860
860
861 uipathfn = getuipathfn(ctx.repo(), legacyrelativevalue=True)
861 uipathfn = getuipathfn(ctx.repo(), legacyrelativevalue=True)
862
862
863 def bad(f, msg):
863 def bad(f, msg):
864 ctx.repo().ui.warn(b"%s: %s\n" % (uipathfn(f), msg))
864 ctx.repo().ui.warn(b"%s: %s\n" % (uipathfn(f), msg))
865
865
866 if badfn is None:
866 if badfn is None:
867 badfn = bad
867 badfn = bad
868
868
869 m = ctx.match(
869 m = ctx.match(
870 pats,
870 pats,
871 opts.get(b'include'),
871 opts.get(b'include'),
872 opts.get(b'exclude'),
872 opts.get(b'exclude'),
873 default,
873 default,
874 listsubrepos=opts.get(b'subrepos'),
874 listsubrepos=opts.get(b'subrepos'),
875 badfn=badfn,
875 badfn=badfn,
876 )
876 )
877
877
878 if m.always():
878 if m.always():
879 pats = []
879 pats = []
880 return m, pats
880 return m, pats
881
881
882
882
883 def match(
883 def match(
884 ctx, pats=(), opts=None, globbed=False, default=b'relpath', badfn=None
884 ctx, pats=(), opts=None, globbed=False, default=b'relpath', badfn=None
885 ):
885 ):
886 '''Return a matcher that will warn about bad matches.'''
886 '''Return a matcher that will warn about bad matches.'''
887 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
887 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
888
888
889
889
890 def matchall(repo):
890 def matchall(repo):
891 '''Return a matcher that will efficiently match everything.'''
891 '''Return a matcher that will efficiently match everything.'''
892 return matchmod.always()
892 return matchmod.always()
893
893
894
894
895 def matchfiles(repo, files, badfn=None):
895 def matchfiles(repo, files, badfn=None):
896 '''Return a matcher that will efficiently match exactly these files.'''
896 '''Return a matcher that will efficiently match exactly these files.'''
897 return matchmod.exact(files, badfn=badfn)
897 return matchmod.exact(files, badfn=badfn)
898
898
899
899
900 def parsefollowlinespattern(repo, rev, pat, msg):
900 def parsefollowlinespattern(repo, rev, pat, msg):
901 """Return a file name from `pat` pattern suitable for usage in followlines
901 """Return a file name from `pat` pattern suitable for usage in followlines
902 logic.
902 logic.
903 """
903 """
904 if not matchmod.patkind(pat):
904 if not matchmod.patkind(pat):
905 return pathutil.canonpath(repo.root, repo.getcwd(), pat)
905 return pathutil.canonpath(repo.root, repo.getcwd(), pat)
906 else:
906 else:
907 ctx = repo[rev]
907 ctx = repo[rev]
908 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=ctx)
908 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=ctx)
909 files = [f for f in ctx if m(f)]
909 files = [f for f in ctx if m(f)]
910 if len(files) != 1:
910 if len(files) != 1:
911 raise error.ParseError(msg)
911 raise error.ParseError(msg)
912 return files[0]
912 return files[0]
913
913
914
914
915 def getorigvfs(ui, repo):
915 def getorigvfs(ui, repo):
916 """return a vfs suitable to save 'orig' file
916 """return a vfs suitable to save 'orig' file
917
917
918 return None if no special directory is configured"""
918 return None if no special directory is configured"""
919 origbackuppath = ui.config(b'ui', b'origbackuppath')
919 origbackuppath = ui.config(b'ui', b'origbackuppath')
920 if not origbackuppath:
920 if not origbackuppath:
921 return None
921 return None
922 return vfs.vfs(repo.wvfs.join(origbackuppath))
922 return vfs.vfs(repo.wvfs.join(origbackuppath))
923
923
924
924
925 def backuppath(ui, repo, filepath):
925 def backuppath(ui, repo, filepath):
926 '''customize where working copy backup files (.orig files) are created
926 '''customize where working copy backup files (.orig files) are created
927
927
928 Fetch user defined path from config file: [ui] origbackuppath = <path>
928 Fetch user defined path from config file: [ui] origbackuppath = <path>
929 Fall back to default (filepath with .orig suffix) if not specified
929 Fall back to default (filepath with .orig suffix) if not specified
930
930
931 filepath is repo-relative
931 filepath is repo-relative
932
932
933 Returns an absolute path
933 Returns an absolute path
934 '''
934 '''
935 origvfs = getorigvfs(ui, repo)
935 origvfs = getorigvfs(ui, repo)
936 if origvfs is None:
936 if origvfs is None:
937 return repo.wjoin(filepath + b".orig")
937 return repo.wjoin(filepath + b".orig")
938
938
939 origbackupdir = origvfs.dirname(filepath)
939 origbackupdir = origvfs.dirname(filepath)
940 if not origvfs.isdir(origbackupdir) or origvfs.islink(origbackupdir):
940 if not origvfs.isdir(origbackupdir) or origvfs.islink(origbackupdir):
941 ui.note(_(b'creating directory: %s\n') % origvfs.join(origbackupdir))
941 ui.note(_(b'creating directory: %s\n') % origvfs.join(origbackupdir))
942
942
943 # Remove any files that conflict with the backup file's path
943 # Remove any files that conflict with the backup file's path
944 for f in reversed(list(pathutil.finddirs(filepath))):
944 for f in reversed(list(pathutil.finddirs(filepath))):
945 if origvfs.isfileorlink(f):
945 if origvfs.isfileorlink(f):
946 ui.note(_(b'removing conflicting file: %s\n') % origvfs.join(f))
946 ui.note(_(b'removing conflicting file: %s\n') % origvfs.join(f))
947 origvfs.unlink(f)
947 origvfs.unlink(f)
948 break
948 break
949
949
950 origvfs.makedirs(origbackupdir)
950 origvfs.makedirs(origbackupdir)
951
951
952 if origvfs.isdir(filepath) and not origvfs.islink(filepath):
952 if origvfs.isdir(filepath) and not origvfs.islink(filepath):
953 ui.note(
953 ui.note(
954 _(b'removing conflicting directory: %s\n') % origvfs.join(filepath)
954 _(b'removing conflicting directory: %s\n') % origvfs.join(filepath)
955 )
955 )
956 origvfs.rmtree(filepath, forcibly=True)
956 origvfs.rmtree(filepath, forcibly=True)
957
957
958 return origvfs.join(filepath)
958 return origvfs.join(filepath)
959
959
960
960
961 class _containsnode(object):
961 class _containsnode(object):
962 """proxy __contains__(node) to container.__contains__ which accepts revs"""
962 """proxy __contains__(node) to container.__contains__ which accepts revs"""
963
963
964 def __init__(self, repo, revcontainer):
964 def __init__(self, repo, revcontainer):
965 self._torev = repo.changelog.rev
965 self._torev = repo.changelog.rev
966 self._revcontains = revcontainer.__contains__
966 self._revcontains = revcontainer.__contains__
967
967
968 def __contains__(self, node):
968 def __contains__(self, node):
969 return self._revcontains(self._torev(node))
969 return self._revcontains(self._torev(node))
970
970
971
971
972 def cleanupnodes(
972 def cleanupnodes(
973 repo,
973 repo,
974 replacements,
974 replacements,
975 operation,
975 operation,
976 moves=None,
976 moves=None,
977 metadata=None,
977 metadata=None,
978 fixphase=False,
978 fixphase=False,
979 targetphase=None,
979 targetphase=None,
980 backup=True,
980 backup=True,
981 ):
981 ):
982 """do common cleanups when old nodes are replaced by new nodes
982 """do common cleanups when old nodes are replaced by new nodes
983
983
984 That includes writing obsmarkers or stripping nodes, and moving bookmarks.
984 That includes writing obsmarkers or stripping nodes, and moving bookmarks.
985 (we might also want to move working directory parent in the future)
985 (we might also want to move working directory parent in the future)
986
986
987 By default, bookmark moves are calculated automatically from 'replacements',
987 By default, bookmark moves are calculated automatically from 'replacements',
988 but 'moves' can be used to override that. Also, 'moves' may include
988 but 'moves' can be used to override that. Also, 'moves' may include
989 additional bookmark moves that should not have associated obsmarkers.
989 additional bookmark moves that should not have associated obsmarkers.
990
990
991 replacements is {oldnode: [newnode]} or a iterable of nodes if they do not
991 replacements is {oldnode: [newnode]} or a iterable of nodes if they do not
992 have replacements. operation is a string, like "rebase".
992 have replacements. operation is a string, like "rebase".
993
993
994 metadata is dictionary containing metadata to be stored in obsmarker if
994 metadata is dictionary containing metadata to be stored in obsmarker if
995 obsolescence is enabled.
995 obsolescence is enabled.
996 """
996 """
997 assert fixphase or targetphase is None
997 assert fixphase or targetphase is None
998 if not replacements and not moves:
998 if not replacements and not moves:
999 return
999 return
1000
1000
1001 # translate mapping's other forms
1001 # translate mapping's other forms
1002 if not util.safehasattr(replacements, b'items'):
1002 if not util.safehasattr(replacements, b'items'):
1003 replacements = {(n,): () for n in replacements}
1003 replacements = {(n,): () for n in replacements}
1004 else:
1004 else:
1005 # upgrading non tuple "source" to tuple ones for BC
1005 # upgrading non tuple "source" to tuple ones for BC
1006 repls = {}
1006 repls = {}
1007 for key, value in replacements.items():
1007 for key, value in replacements.items():
1008 if not isinstance(key, tuple):
1008 if not isinstance(key, tuple):
1009 key = (key,)
1009 key = (key,)
1010 repls[key] = value
1010 repls[key] = value
1011 replacements = repls
1011 replacements = repls
1012
1012
1013 # Unfiltered repo is needed since nodes in replacements might be hidden.
1013 # Unfiltered repo is needed since nodes in replacements might be hidden.
1014 unfi = repo.unfiltered()
1014 unfi = repo.unfiltered()
1015
1015
1016 # Calculate bookmark movements
1016 # Calculate bookmark movements
1017 if moves is None:
1017 if moves is None:
1018 moves = {}
1018 moves = {}
1019 for oldnodes, newnodes in replacements.items():
1019 for oldnodes, newnodes in replacements.items():
1020 for oldnode in oldnodes:
1020 for oldnode in oldnodes:
1021 if oldnode in moves:
1021 if oldnode in moves:
1022 continue
1022 continue
1023 if len(newnodes) > 1:
1023 if len(newnodes) > 1:
1024 # usually a split, take the one with biggest rev number
1024 # usually a split, take the one with biggest rev number
1025 newnode = next(unfi.set(b'max(%ln)', newnodes)).node()
1025 newnode = next(unfi.set(b'max(%ln)', newnodes)).node()
1026 elif len(newnodes) == 0:
1026 elif len(newnodes) == 0:
1027 # move bookmark backwards
1027 # move bookmark backwards
1028 allreplaced = []
1028 allreplaced = []
1029 for rep in replacements:
1029 for rep in replacements:
1030 allreplaced.extend(rep)
1030 allreplaced.extend(rep)
1031 roots = list(
1031 roots = list(
1032 unfi.set(b'max((::%n) - %ln)', oldnode, allreplaced)
1032 unfi.set(b'max((::%n) - %ln)', oldnode, allreplaced)
1033 )
1033 )
1034 if roots:
1034 if roots:
1035 newnode = roots[0].node()
1035 newnode = roots[0].node()
1036 else:
1036 else:
1037 newnode = nullid
1037 newnode = nullid
1038 else:
1038 else:
1039 newnode = newnodes[0]
1039 newnode = newnodes[0]
1040 moves[oldnode] = newnode
1040 moves[oldnode] = newnode
1041
1041
1042 allnewnodes = [n for ns in replacements.values() for n in ns]
1042 allnewnodes = [n for ns in replacements.values() for n in ns]
1043 toretract = {}
1043 toretract = {}
1044 toadvance = {}
1044 toadvance = {}
1045 if fixphase:
1045 if fixphase:
1046 precursors = {}
1046 precursors = {}
1047 for oldnodes, newnodes in replacements.items():
1047 for oldnodes, newnodes in replacements.items():
1048 for oldnode in oldnodes:
1048 for oldnode in oldnodes:
1049 for newnode in newnodes:
1049 for newnode in newnodes:
1050 precursors.setdefault(newnode, []).append(oldnode)
1050 precursors.setdefault(newnode, []).append(oldnode)
1051
1051
1052 allnewnodes.sort(key=lambda n: unfi[n].rev())
1052 allnewnodes.sort(key=lambda n: unfi[n].rev())
1053 newphases = {}
1053 newphases = {}
1054
1054
1055 def phase(ctx):
1055 def phase(ctx):
1056 return newphases.get(ctx.node(), ctx.phase())
1056 return newphases.get(ctx.node(), ctx.phase())
1057
1057
1058 for newnode in allnewnodes:
1058 for newnode in allnewnodes:
1059 ctx = unfi[newnode]
1059 ctx = unfi[newnode]
1060 parentphase = max(phase(p) for p in ctx.parents())
1060 parentphase = max(phase(p) for p in ctx.parents())
1061 if targetphase is None:
1061 if targetphase is None:
1062 oldphase = max(
1062 oldphase = max(
1063 unfi[oldnode].phase() for oldnode in precursors[newnode]
1063 unfi[oldnode].phase() for oldnode in precursors[newnode]
1064 )
1064 )
1065 newphase = max(oldphase, parentphase)
1065 newphase = max(oldphase, parentphase)
1066 else:
1066 else:
1067 newphase = max(targetphase, parentphase)
1067 newphase = max(targetphase, parentphase)
1068 newphases[newnode] = newphase
1068 newphases[newnode] = newphase
1069 if newphase > ctx.phase():
1069 if newphase > ctx.phase():
1070 toretract.setdefault(newphase, []).append(newnode)
1070 toretract.setdefault(newphase, []).append(newnode)
1071 elif newphase < ctx.phase():
1071 elif newphase < ctx.phase():
1072 toadvance.setdefault(newphase, []).append(newnode)
1072 toadvance.setdefault(newphase, []).append(newnode)
1073
1073
1074 with repo.transaction(b'cleanup') as tr:
1074 with repo.transaction(b'cleanup') as tr:
1075 # Move bookmarks
1075 # Move bookmarks
1076 bmarks = repo._bookmarks
1076 bmarks = repo._bookmarks
1077 bmarkchanges = []
1077 bmarkchanges = []
1078 for oldnode, newnode in moves.items():
1078 for oldnode, newnode in moves.items():
1079 oldbmarks = repo.nodebookmarks(oldnode)
1079 oldbmarks = repo.nodebookmarks(oldnode)
1080 if not oldbmarks:
1080 if not oldbmarks:
1081 continue
1081 continue
1082 from . import bookmarks # avoid import cycle
1082 from . import bookmarks # avoid import cycle
1083
1083
1084 repo.ui.debug(
1084 repo.ui.debug(
1085 b'moving bookmarks %r from %s to %s\n'
1085 b'moving bookmarks %r from %s to %s\n'
1086 % (
1086 % (
1087 pycompat.rapply(pycompat.maybebytestr, oldbmarks),
1087 pycompat.rapply(pycompat.maybebytestr, oldbmarks),
1088 hex(oldnode),
1088 hex(oldnode),
1089 hex(newnode),
1089 hex(newnode),
1090 )
1090 )
1091 )
1091 )
1092 # Delete divergent bookmarks being parents of related newnodes
1092 # Delete divergent bookmarks being parents of related newnodes
1093 deleterevs = repo.revs(
1093 deleterevs = repo.revs(
1094 b'parents(roots(%ln & (::%n))) - parents(%n)',
1094 b'parents(roots(%ln & (::%n))) - parents(%n)',
1095 allnewnodes,
1095 allnewnodes,
1096 newnode,
1096 newnode,
1097 oldnode,
1097 oldnode,
1098 )
1098 )
1099 deletenodes = _containsnode(repo, deleterevs)
1099 deletenodes = _containsnode(repo, deleterevs)
1100 for name in oldbmarks:
1100 for name in oldbmarks:
1101 bmarkchanges.append((name, newnode))
1101 bmarkchanges.append((name, newnode))
1102 for b in bookmarks.divergent2delete(repo, deletenodes, name):
1102 for b in bookmarks.divergent2delete(repo, deletenodes, name):
1103 bmarkchanges.append((b, None))
1103 bmarkchanges.append((b, None))
1104
1104
1105 if bmarkchanges:
1105 if bmarkchanges:
1106 bmarks.applychanges(repo, tr, bmarkchanges)
1106 bmarks.applychanges(repo, tr, bmarkchanges)
1107
1107
1108 for phase, nodes in toretract.items():
1108 for phase, nodes in toretract.items():
1109 phases.retractboundary(repo, tr, phase, nodes)
1109 phases.retractboundary(repo, tr, phase, nodes)
1110 for phase, nodes in toadvance.items():
1110 for phase, nodes in toadvance.items():
1111 phases.advanceboundary(repo, tr, phase, nodes)
1111 phases.advanceboundary(repo, tr, phase, nodes)
1112
1112
1113 mayusearchived = repo.ui.config(b'experimental', b'cleanup-as-archived')
1113 mayusearchived = repo.ui.config(b'experimental', b'cleanup-as-archived')
1114 # Obsolete or strip nodes
1114 # Obsolete or strip nodes
1115 if obsolete.isenabled(repo, obsolete.createmarkersopt):
1115 if obsolete.isenabled(repo, obsolete.createmarkersopt):
1116 # If a node is already obsoleted, and we want to obsolete it
1116 # If a node is already obsoleted, and we want to obsolete it
1117 # without a successor, skip that obssolete request since it's
1117 # without a successor, skip that obssolete request since it's
1118 # unnecessary. That's the "if s or not isobs(n)" check below.
1118 # unnecessary. That's the "if s or not isobs(n)" check below.
1119 # Also sort the node in topology order, that might be useful for
1119 # Also sort the node in topology order, that might be useful for
1120 # some obsstore logic.
1120 # some obsstore logic.
1121 # NOTE: the sorting might belong to createmarkers.
1121 # NOTE: the sorting might belong to createmarkers.
1122 torev = unfi.changelog.rev
1122 torev = unfi.changelog.rev
1123 sortfunc = lambda ns: torev(ns[0][0])
1123 sortfunc = lambda ns: torev(ns[0][0])
1124 rels = []
1124 rels = []
1125 for ns, s in sorted(replacements.items(), key=sortfunc):
1125 for ns, s in sorted(replacements.items(), key=sortfunc):
1126 rel = (tuple(unfi[n] for n in ns), tuple(unfi[m] for m in s))
1126 rel = (tuple(unfi[n] for n in ns), tuple(unfi[m] for m in s))
1127 rels.append(rel)
1127 rels.append(rel)
1128 if rels:
1128 if rels:
1129 obsolete.createmarkers(
1129 obsolete.createmarkers(
1130 repo, rels, operation=operation, metadata=metadata
1130 repo, rels, operation=operation, metadata=metadata
1131 )
1131 )
1132 elif phases.supportinternal(repo) and mayusearchived:
1132 elif phases.supportinternal(repo) and mayusearchived:
1133 # this assume we do not have "unstable" nodes above the cleaned ones
1133 # this assume we do not have "unstable" nodes above the cleaned ones
1134 allreplaced = set()
1134 allreplaced = set()
1135 for ns in replacements.keys():
1135 for ns in replacements.keys():
1136 allreplaced.update(ns)
1136 allreplaced.update(ns)
1137 if backup:
1137 if backup:
1138 from . import repair # avoid import cycle
1138 from . import repair # avoid import cycle
1139
1139
1140 node = min(allreplaced, key=repo.changelog.rev)
1140 node = min(allreplaced, key=repo.changelog.rev)
1141 repair.backupbundle(
1141 repair.backupbundle(
1142 repo, allreplaced, allreplaced, node, operation
1142 repo, allreplaced, allreplaced, node, operation
1143 )
1143 )
1144 phases.retractboundary(repo, tr, phases.archived, allreplaced)
1144 phases.retractboundary(repo, tr, phases.archived, allreplaced)
1145 else:
1145 else:
1146 from . import repair # avoid import cycle
1146 from . import repair # avoid import cycle
1147
1147
1148 tostrip = list(n for ns in replacements for n in ns)
1148 tostrip = list(n for ns in replacements for n in ns)
1149 if tostrip:
1149 if tostrip:
1150 repair.delayedstrip(
1150 repair.delayedstrip(
1151 repo.ui, repo, tostrip, operation, backup=backup
1151 repo.ui, repo, tostrip, operation, backup=backup
1152 )
1152 )
1153
1153
1154
1154
1155 def addremove(repo, matcher, prefix, uipathfn, opts=None):
1155 def addremove(repo, matcher, prefix, uipathfn, opts=None):
1156 if opts is None:
1156 if opts is None:
1157 opts = {}
1157 opts = {}
1158 m = matcher
1158 m = matcher
1159 dry_run = opts.get(b'dry_run')
1159 dry_run = opts.get(b'dry_run')
1160 try:
1160 try:
1161 similarity = float(opts.get(b'similarity') or 0)
1161 similarity = float(opts.get(b'similarity') or 0)
1162 except ValueError:
1162 except ValueError:
1163 raise error.Abort(_(b'similarity must be a number'))
1163 raise error.Abort(_(b'similarity must be a number'))
1164 if similarity < 0 or similarity > 100:
1164 if similarity < 0 or similarity > 100:
1165 raise error.Abort(_(b'similarity must be between 0 and 100'))
1165 raise error.Abort(_(b'similarity must be between 0 and 100'))
1166 similarity /= 100.0
1166 similarity /= 100.0
1167
1167
1168 ret = 0
1168 ret = 0
1169
1169
1170 wctx = repo[None]
1170 wctx = repo[None]
1171 for subpath in sorted(wctx.substate):
1171 for subpath in sorted(wctx.substate):
1172 submatch = matchmod.subdirmatcher(subpath, m)
1172 submatch = matchmod.subdirmatcher(subpath, m)
1173 if opts.get(b'subrepos') or m.exact(subpath) or any(submatch.files()):
1173 if opts.get(b'subrepos') or m.exact(subpath) or any(submatch.files()):
1174 sub = wctx.sub(subpath)
1174 sub = wctx.sub(subpath)
1175 subprefix = repo.wvfs.reljoin(prefix, subpath)
1175 subprefix = repo.wvfs.reljoin(prefix, subpath)
1176 subuipathfn = subdiruipathfn(subpath, uipathfn)
1176 subuipathfn = subdiruipathfn(subpath, uipathfn)
1177 try:
1177 try:
1178 if sub.addremove(submatch, subprefix, subuipathfn, opts):
1178 if sub.addremove(submatch, subprefix, subuipathfn, opts):
1179 ret = 1
1179 ret = 1
1180 except error.LookupError:
1180 except error.LookupError:
1181 repo.ui.status(
1181 repo.ui.status(
1182 _(b"skipping missing subrepository: %s\n")
1182 _(b"skipping missing subrepository: %s\n")
1183 % uipathfn(subpath)
1183 % uipathfn(subpath)
1184 )
1184 )
1185
1185
1186 rejected = []
1186 rejected = []
1187
1187
1188 def badfn(f, msg):
1188 def badfn(f, msg):
1189 if f in m.files():
1189 if f in m.files():
1190 m.bad(f, msg)
1190 m.bad(f, msg)
1191 rejected.append(f)
1191 rejected.append(f)
1192
1192
1193 badmatch = matchmod.badmatch(m, badfn)
1193 badmatch = matchmod.badmatch(m, badfn)
1194 added, unknown, deleted, removed, forgotten = _interestingfiles(
1194 added, unknown, deleted, removed, forgotten = _interestingfiles(
1195 repo, badmatch
1195 repo, badmatch
1196 )
1196 )
1197
1197
1198 unknownset = set(unknown + forgotten)
1198 unknownset = set(unknown + forgotten)
1199 toprint = unknownset.copy()
1199 toprint = unknownset.copy()
1200 toprint.update(deleted)
1200 toprint.update(deleted)
1201 for abs in sorted(toprint):
1201 for abs in sorted(toprint):
1202 if repo.ui.verbose or not m.exact(abs):
1202 if repo.ui.verbose or not m.exact(abs):
1203 if abs in unknownset:
1203 if abs in unknownset:
1204 status = _(b'adding %s\n') % uipathfn(abs)
1204 status = _(b'adding %s\n') % uipathfn(abs)
1205 label = b'ui.addremove.added'
1205 label = b'ui.addremove.added'
1206 else:
1206 else:
1207 status = _(b'removing %s\n') % uipathfn(abs)
1207 status = _(b'removing %s\n') % uipathfn(abs)
1208 label = b'ui.addremove.removed'
1208 label = b'ui.addremove.removed'
1209 repo.ui.status(status, label=label)
1209 repo.ui.status(status, label=label)
1210
1210
1211 renames = _findrenames(
1211 renames = _findrenames(
1212 repo, m, added + unknown, removed + deleted, similarity, uipathfn
1212 repo, m, added + unknown, removed + deleted, similarity, uipathfn
1213 )
1213 )
1214
1214
1215 if not dry_run:
1215 if not dry_run:
1216 _markchanges(repo, unknown + forgotten, deleted, renames)
1216 _markchanges(repo, unknown + forgotten, deleted, renames)
1217
1217
1218 for f in rejected:
1218 for f in rejected:
1219 if f in m.files():
1219 if f in m.files():
1220 return 1
1220 return 1
1221 return ret
1221 return ret
1222
1222
1223
1223
1224 def marktouched(repo, files, similarity=0.0):
1224 def marktouched(repo, files, similarity=0.0):
1225 '''Assert that files have somehow been operated upon. files are relative to
1225 '''Assert that files have somehow been operated upon. files are relative to
1226 the repo root.'''
1226 the repo root.'''
1227 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
1227 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
1228 rejected = []
1228 rejected = []
1229
1229
1230 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
1230 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
1231
1231
1232 if repo.ui.verbose:
1232 if repo.ui.verbose:
1233 unknownset = set(unknown + forgotten)
1233 unknownset = set(unknown + forgotten)
1234 toprint = unknownset.copy()
1234 toprint = unknownset.copy()
1235 toprint.update(deleted)
1235 toprint.update(deleted)
1236 for abs in sorted(toprint):
1236 for abs in sorted(toprint):
1237 if abs in unknownset:
1237 if abs in unknownset:
1238 status = _(b'adding %s\n') % abs
1238 status = _(b'adding %s\n') % abs
1239 else:
1239 else:
1240 status = _(b'removing %s\n') % abs
1240 status = _(b'removing %s\n') % abs
1241 repo.ui.status(status)
1241 repo.ui.status(status)
1242
1242
1243 # TODO: We should probably have the caller pass in uipathfn and apply it to
1243 # TODO: We should probably have the caller pass in uipathfn and apply it to
1244 # the messages above too. legacyrelativevalue=True is consistent with how
1244 # the messages above too. legacyrelativevalue=True is consistent with how
1245 # it used to work.
1245 # it used to work.
1246 uipathfn = getuipathfn(repo, legacyrelativevalue=True)
1246 uipathfn = getuipathfn(repo, legacyrelativevalue=True)
1247 renames = _findrenames(
1247 renames = _findrenames(
1248 repo, m, added + unknown, removed + deleted, similarity, uipathfn
1248 repo, m, added + unknown, removed + deleted, similarity, uipathfn
1249 )
1249 )
1250
1250
1251 _markchanges(repo, unknown + forgotten, deleted, renames)
1251 _markchanges(repo, unknown + forgotten, deleted, renames)
1252
1252
1253 for f in rejected:
1253 for f in rejected:
1254 if f in m.files():
1254 if f in m.files():
1255 return 1
1255 return 1
1256 return 0
1256 return 0
1257
1257
1258
1258
1259 def _interestingfiles(repo, matcher):
1259 def _interestingfiles(repo, matcher):
1260 '''Walk dirstate with matcher, looking for files that addremove would care
1260 '''Walk dirstate with matcher, looking for files that addremove would care
1261 about.
1261 about.
1262
1262
1263 This is different from dirstate.status because it doesn't care about
1263 This is different from dirstate.status because it doesn't care about
1264 whether files are modified or clean.'''
1264 whether files are modified or clean.'''
1265 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1265 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1266 audit_path = pathutil.pathauditor(repo.root, cached=True)
1266 audit_path = pathutil.pathauditor(repo.root, cached=True)
1267
1267
1268 ctx = repo[None]
1268 ctx = repo[None]
1269 dirstate = repo.dirstate
1269 dirstate = repo.dirstate
1270 matcher = repo.narrowmatch(matcher, includeexact=True)
1270 matcher = repo.narrowmatch(matcher, includeexact=True)
1271 walkresults = dirstate.walk(
1271 walkresults = dirstate.walk(
1272 matcher,
1272 matcher,
1273 subrepos=sorted(ctx.substate),
1273 subrepos=sorted(ctx.substate),
1274 unknown=True,
1274 unknown=True,
1275 ignored=False,
1275 ignored=False,
1276 full=False,
1276 full=False,
1277 )
1277 )
1278 for abs, st in pycompat.iteritems(walkresults):
1278 for abs, st in pycompat.iteritems(walkresults):
1279 dstate = dirstate[abs]
1279 dstate = dirstate[abs]
1280 if dstate == b'?' and audit_path.check(abs):
1280 if dstate == b'?' and audit_path.check(abs):
1281 unknown.append(abs)
1281 unknown.append(abs)
1282 elif dstate != b'r' and not st:
1282 elif dstate != b'r' and not st:
1283 deleted.append(abs)
1283 deleted.append(abs)
1284 elif dstate == b'r' and st:
1284 elif dstate == b'r' and st:
1285 forgotten.append(abs)
1285 forgotten.append(abs)
1286 # for finding renames
1286 # for finding renames
1287 elif dstate == b'r' and not st:
1287 elif dstate == b'r' and not st:
1288 removed.append(abs)
1288 removed.append(abs)
1289 elif dstate == b'a':
1289 elif dstate == b'a':
1290 added.append(abs)
1290 added.append(abs)
1291
1291
1292 return added, unknown, deleted, removed, forgotten
1292 return added, unknown, deleted, removed, forgotten
1293
1293
1294
1294
1295 def _findrenames(repo, matcher, added, removed, similarity, uipathfn):
1295 def _findrenames(repo, matcher, added, removed, similarity, uipathfn):
1296 '''Find renames from removed files to added ones.'''
1296 '''Find renames from removed files to added ones.'''
1297 renames = {}
1297 renames = {}
1298 if similarity > 0:
1298 if similarity > 0:
1299 for old, new, score in similar.findrenames(
1299 for old, new, score in similar.findrenames(
1300 repo, added, removed, similarity
1300 repo, added, removed, similarity
1301 ):
1301 ):
1302 if (
1302 if (
1303 repo.ui.verbose
1303 repo.ui.verbose
1304 or not matcher.exact(old)
1304 or not matcher.exact(old)
1305 or not matcher.exact(new)
1305 or not matcher.exact(new)
1306 ):
1306 ):
1307 repo.ui.status(
1307 repo.ui.status(
1308 _(
1308 _(
1309 b'recording removal of %s as rename to %s '
1309 b'recording removal of %s as rename to %s '
1310 b'(%d%% similar)\n'
1310 b'(%d%% similar)\n'
1311 )
1311 )
1312 % (uipathfn(old), uipathfn(new), score * 100)
1312 % (uipathfn(old), uipathfn(new), score * 100)
1313 )
1313 )
1314 renames[new] = old
1314 renames[new] = old
1315 return renames
1315 return renames
1316
1316
1317
1317
1318 def _markchanges(repo, unknown, deleted, renames):
1318 def _markchanges(repo, unknown, deleted, renames):
1319 '''Marks the files in unknown as added, the files in deleted as removed,
1319 '''Marks the files in unknown as added, the files in deleted as removed,
1320 and the files in renames as copied.'''
1320 and the files in renames as copied.'''
1321 wctx = repo[None]
1321 wctx = repo[None]
1322 with repo.wlock():
1322 with repo.wlock():
1323 wctx.forget(deleted)
1323 wctx.forget(deleted)
1324 wctx.add(unknown)
1324 wctx.add(unknown)
1325 for new, old in pycompat.iteritems(renames):
1325 for new, old in pycompat.iteritems(renames):
1326 wctx.copy(old, new)
1326 wctx.copy(old, new)
1327
1327
1328
1328
1329 def getrenamedfn(repo, endrev=None):
1329 def getrenamedfn(repo, endrev=None):
1330 if copiesmod.usechangesetcentricalgo(repo):
1330 if copiesmod.usechangesetcentricalgo(repo):
1331
1331
1332 def getrenamed(fn, rev):
1332 def getrenamed(fn, rev):
1333 ctx = repo[rev]
1333 ctx = repo[rev]
1334 p1copies = ctx.p1copies()
1334 p1copies = ctx.p1copies()
1335 if fn in p1copies:
1335 if fn in p1copies:
1336 return p1copies[fn]
1336 return p1copies[fn]
1337 p2copies = ctx.p2copies()
1337 p2copies = ctx.p2copies()
1338 if fn in p2copies:
1338 if fn in p2copies:
1339 return p2copies[fn]
1339 return p2copies[fn]
1340 return None
1340 return None
1341
1341
1342 return getrenamed
1342 return getrenamed
1343
1343
1344 rcache = {}
1344 rcache = {}
1345 if endrev is None:
1345 if endrev is None:
1346 endrev = len(repo)
1346 endrev = len(repo)
1347
1347
1348 def getrenamed(fn, rev):
1348 def getrenamed(fn, rev):
1349 '''looks up all renames for a file (up to endrev) the first
1349 '''looks up all renames for a file (up to endrev) the first
1350 time the file is given. It indexes on the changerev and only
1350 time the file is given. It indexes on the changerev and only
1351 parses the manifest if linkrev != changerev.
1351 parses the manifest if linkrev != changerev.
1352 Returns rename info for fn at changerev rev.'''
1352 Returns rename info for fn at changerev rev.'''
1353 if fn not in rcache:
1353 if fn not in rcache:
1354 rcache[fn] = {}
1354 rcache[fn] = {}
1355 fl = repo.file(fn)
1355 fl = repo.file(fn)
1356 for i in fl:
1356 for i in fl:
1357 lr = fl.linkrev(i)
1357 lr = fl.linkrev(i)
1358 renamed = fl.renamed(fl.node(i))
1358 renamed = fl.renamed(fl.node(i))
1359 rcache[fn][lr] = renamed and renamed[0]
1359 rcache[fn][lr] = renamed and renamed[0]
1360 if lr >= endrev:
1360 if lr >= endrev:
1361 break
1361 break
1362 if rev in rcache[fn]:
1362 if rev in rcache[fn]:
1363 return rcache[fn][rev]
1363 return rcache[fn][rev]
1364
1364
1365 # If linkrev != rev (i.e. rev not found in rcache) fallback to
1365 # If linkrev != rev (i.e. rev not found in rcache) fallback to
1366 # filectx logic.
1366 # filectx logic.
1367 try:
1367 try:
1368 return repo[rev][fn].copysource()
1368 return repo[rev][fn].copysource()
1369 except error.LookupError:
1369 except error.LookupError:
1370 return None
1370 return None
1371
1371
1372 return getrenamed
1372 return getrenamed
1373
1373
1374
1374
1375 def getcopiesfn(repo, endrev=None):
1375 def getcopiesfn(repo, endrev=None):
1376 if copiesmod.usechangesetcentricalgo(repo):
1376 if copiesmod.usechangesetcentricalgo(repo):
1377
1377
1378 def copiesfn(ctx):
1378 def copiesfn(ctx):
1379 if ctx.p2copies():
1379 if ctx.p2copies():
1380 allcopies = ctx.p1copies().copy()
1380 allcopies = ctx.p1copies().copy()
1381 # There should be no overlap
1381 # There should be no overlap
1382 allcopies.update(ctx.p2copies())
1382 allcopies.update(ctx.p2copies())
1383 return sorted(allcopies.items())
1383 return sorted(allcopies.items())
1384 else:
1384 else:
1385 return sorted(ctx.p1copies().items())
1385 return sorted(ctx.p1copies().items())
1386
1386
1387 else:
1387 else:
1388 getrenamed = getrenamedfn(repo, endrev)
1388 getrenamed = getrenamedfn(repo, endrev)
1389
1389
1390 def copiesfn(ctx):
1390 def copiesfn(ctx):
1391 copies = []
1391 copies = []
1392 for fn in ctx.files():
1392 for fn in ctx.files():
1393 rename = getrenamed(fn, ctx.rev())
1393 rename = getrenamed(fn, ctx.rev())
1394 if rename:
1394 if rename:
1395 copies.append((fn, rename))
1395 copies.append((fn, rename))
1396 return copies
1396 return copies
1397
1397
1398 return copiesfn
1398 return copiesfn
1399
1399
1400
1400
1401 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1401 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1402 """Update the dirstate to reflect the intent of copying src to dst. For
1402 """Update the dirstate to reflect the intent of copying src to dst. For
1403 different reasons it might not end with dst being marked as copied from src.
1403 different reasons it might not end with dst being marked as copied from src.
1404 """
1404 """
1405 origsrc = repo.dirstate.copied(src) or src
1405 origsrc = repo.dirstate.copied(src) or src
1406 if dst == origsrc: # copying back a copy?
1406 if dst == origsrc: # copying back a copy?
1407 if repo.dirstate[dst] not in b'mn' and not dryrun:
1407 if repo.dirstate[dst] not in b'mn' and not dryrun:
1408 repo.dirstate.normallookup(dst)
1408 repo.dirstate.normallookup(dst)
1409 else:
1409 else:
1410 if repo.dirstate[origsrc] == b'a' and origsrc == src:
1410 if repo.dirstate[origsrc] == b'a' and origsrc == src:
1411 if not ui.quiet:
1411 if not ui.quiet:
1412 ui.warn(
1412 ui.warn(
1413 _(
1413 _(
1414 b"%s has not been committed yet, so no copy "
1414 b"%s has not been committed yet, so no copy "
1415 b"data will be stored for %s.\n"
1415 b"data will be stored for %s.\n"
1416 )
1416 )
1417 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd))
1417 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd))
1418 )
1418 )
1419 if repo.dirstate[dst] in b'?r' and not dryrun:
1419 if repo.dirstate[dst] in b'?r' and not dryrun:
1420 wctx.add([dst])
1420 wctx.add([dst])
1421 elif not dryrun:
1421 elif not dryrun:
1422 wctx.copy(origsrc, dst)
1422 wctx.copy(origsrc, dst)
1423
1423
1424
1424
1425 def movedirstate(repo, newctx, match=None):
1425 def movedirstate(repo, newctx, match=None):
1426 """Move the dirstate to newctx and adjust it as necessary.
1426 """Move the dirstate to newctx and adjust it as necessary.
1427
1427
1428 A matcher can be provided as an optimization. It is probably a bug to pass
1428 A matcher can be provided as an optimization. It is probably a bug to pass
1429 a matcher that doesn't match all the differences between the parent of the
1429 a matcher that doesn't match all the differences between the parent of the
1430 working copy and newctx.
1430 working copy and newctx.
1431 """
1431 """
1432 oldctx = repo[b'.']
1432 oldctx = repo[b'.']
1433 ds = repo.dirstate
1433 ds = repo.dirstate
1434 copies = dict(ds.copies())
1434 ds.setparents(newctx.node(), nullid)
1435 ds.setparents(newctx.node(), nullid)
1435 copies = dict(ds.copies())
1436 s = newctx.status(oldctx, match=match)
1436 s = newctx.status(oldctx, match=match)
1437 for f in s.modified:
1437 for f in s.modified:
1438 if ds[f] == b'r':
1438 if ds[f] == b'r':
1439 # modified + removed -> removed
1439 # modified + removed -> removed
1440 continue
1440 continue
1441 ds.normallookup(f)
1441 ds.normallookup(f)
1442
1442
1443 for f in s.added:
1443 for f in s.added:
1444 if ds[f] == b'r':
1444 if ds[f] == b'r':
1445 # added + removed -> unknown
1445 # added + removed -> unknown
1446 ds.drop(f)
1446 ds.drop(f)
1447 elif ds[f] != b'a':
1447 elif ds[f] != b'a':
1448 ds.add(f)
1448 ds.add(f)
1449
1449
1450 for f in s.removed:
1450 for f in s.removed:
1451 if ds[f] == b'a':
1451 if ds[f] == b'a':
1452 # removed + added -> normal
1452 # removed + added -> normal
1453 ds.normallookup(f)
1453 ds.normallookup(f)
1454 elif ds[f] != b'r':
1454 elif ds[f] != b'r':
1455 ds.remove(f)
1455 ds.remove(f)
1456
1456
1457 # Merge old parent and old working dir copies
1457 # Merge old parent and old working dir copies
1458 oldcopies = copiesmod.pathcopies(newctx, oldctx, match)
1458 oldcopies = copiesmod.pathcopies(newctx, oldctx, match)
1459 oldcopies.update(copies)
1459 oldcopies.update(copies)
1460 copies = dict(
1460 copies = dict(
1461 (dst, oldcopies.get(src, src))
1461 (dst, oldcopies.get(src, src))
1462 for dst, src in pycompat.iteritems(oldcopies)
1462 for dst, src in pycompat.iteritems(oldcopies)
1463 )
1463 )
1464 # Adjust the dirstate copies
1464 # Adjust the dirstate copies
1465 for dst, src in pycompat.iteritems(copies):
1465 for dst, src in pycompat.iteritems(copies):
1466 if src not in newctx or dst in newctx or ds[dst] != b'a':
1466 if src not in newctx or dst in newctx or ds[dst] != b'a':
1467 src = None
1467 src = None
1468 ds.copy(src, dst)
1468 ds.copy(src, dst)
1469
1469
1470
1470
1471 def writerequires(opener, requirements):
1471 def writerequires(opener, requirements):
1472 with opener(b'requires', b'w', atomictemp=True) as fp:
1472 with opener(b'requires', b'w', atomictemp=True) as fp:
1473 for r in sorted(requirements):
1473 for r in sorted(requirements):
1474 fp.write(b"%s\n" % r)
1474 fp.write(b"%s\n" % r)
1475
1475
1476
1476
1477 class filecachesubentry(object):
1477 class filecachesubentry(object):
1478 def __init__(self, path, stat):
1478 def __init__(self, path, stat):
1479 self.path = path
1479 self.path = path
1480 self.cachestat = None
1480 self.cachestat = None
1481 self._cacheable = None
1481 self._cacheable = None
1482
1482
1483 if stat:
1483 if stat:
1484 self.cachestat = filecachesubentry.stat(self.path)
1484 self.cachestat = filecachesubentry.stat(self.path)
1485
1485
1486 if self.cachestat:
1486 if self.cachestat:
1487 self._cacheable = self.cachestat.cacheable()
1487 self._cacheable = self.cachestat.cacheable()
1488 else:
1488 else:
1489 # None means we don't know yet
1489 # None means we don't know yet
1490 self._cacheable = None
1490 self._cacheable = None
1491
1491
1492 def refresh(self):
1492 def refresh(self):
1493 if self.cacheable():
1493 if self.cacheable():
1494 self.cachestat = filecachesubentry.stat(self.path)
1494 self.cachestat = filecachesubentry.stat(self.path)
1495
1495
1496 def cacheable(self):
1496 def cacheable(self):
1497 if self._cacheable is not None:
1497 if self._cacheable is not None:
1498 return self._cacheable
1498 return self._cacheable
1499
1499
1500 # we don't know yet, assume it is for now
1500 # we don't know yet, assume it is for now
1501 return True
1501 return True
1502
1502
1503 def changed(self):
1503 def changed(self):
1504 # no point in going further if we can't cache it
1504 # no point in going further if we can't cache it
1505 if not self.cacheable():
1505 if not self.cacheable():
1506 return True
1506 return True
1507
1507
1508 newstat = filecachesubentry.stat(self.path)
1508 newstat = filecachesubentry.stat(self.path)
1509
1509
1510 # we may not know if it's cacheable yet, check again now
1510 # we may not know if it's cacheable yet, check again now
1511 if newstat and self._cacheable is None:
1511 if newstat and self._cacheable is None:
1512 self._cacheable = newstat.cacheable()
1512 self._cacheable = newstat.cacheable()
1513
1513
1514 # check again
1514 # check again
1515 if not self._cacheable:
1515 if not self._cacheable:
1516 return True
1516 return True
1517
1517
1518 if self.cachestat != newstat:
1518 if self.cachestat != newstat:
1519 self.cachestat = newstat
1519 self.cachestat = newstat
1520 return True
1520 return True
1521 else:
1521 else:
1522 return False
1522 return False
1523
1523
1524 @staticmethod
1524 @staticmethod
1525 def stat(path):
1525 def stat(path):
1526 try:
1526 try:
1527 return util.cachestat(path)
1527 return util.cachestat(path)
1528 except OSError as e:
1528 except OSError as e:
1529 if e.errno != errno.ENOENT:
1529 if e.errno != errno.ENOENT:
1530 raise
1530 raise
1531
1531
1532
1532
1533 class filecacheentry(object):
1533 class filecacheentry(object):
1534 def __init__(self, paths, stat=True):
1534 def __init__(self, paths, stat=True):
1535 self._entries = []
1535 self._entries = []
1536 for path in paths:
1536 for path in paths:
1537 self._entries.append(filecachesubentry(path, stat))
1537 self._entries.append(filecachesubentry(path, stat))
1538
1538
1539 def changed(self):
1539 def changed(self):
1540 '''true if any entry has changed'''
1540 '''true if any entry has changed'''
1541 for entry in self._entries:
1541 for entry in self._entries:
1542 if entry.changed():
1542 if entry.changed():
1543 return True
1543 return True
1544 return False
1544 return False
1545
1545
1546 def refresh(self):
1546 def refresh(self):
1547 for entry in self._entries:
1547 for entry in self._entries:
1548 entry.refresh()
1548 entry.refresh()
1549
1549
1550
1550
1551 class filecache(object):
1551 class filecache(object):
1552 """A property like decorator that tracks files under .hg/ for updates.
1552 """A property like decorator that tracks files under .hg/ for updates.
1553
1553
1554 On first access, the files defined as arguments are stat()ed and the
1554 On first access, the files defined as arguments are stat()ed and the
1555 results cached. The decorated function is called. The results are stashed
1555 results cached. The decorated function is called. The results are stashed
1556 away in a ``_filecache`` dict on the object whose method is decorated.
1556 away in a ``_filecache`` dict on the object whose method is decorated.
1557
1557
1558 On subsequent access, the cached result is used as it is set to the
1558 On subsequent access, the cached result is used as it is set to the
1559 instance dictionary.
1559 instance dictionary.
1560
1560
1561 On external property set/delete operations, the caller must update the
1561 On external property set/delete operations, the caller must update the
1562 corresponding _filecache entry appropriately. Use __class__.<attr>.set()
1562 corresponding _filecache entry appropriately. Use __class__.<attr>.set()
1563 instead of directly setting <attr>.
1563 instead of directly setting <attr>.
1564
1564
1565 When using the property API, the cached data is always used if available.
1565 When using the property API, the cached data is always used if available.
1566 No stat() is performed to check if the file has changed.
1566 No stat() is performed to check if the file has changed.
1567
1567
1568 Others can muck about with the state of the ``_filecache`` dict. e.g. they
1568 Others can muck about with the state of the ``_filecache`` dict. e.g. they
1569 can populate an entry before the property's getter is called. In this case,
1569 can populate an entry before the property's getter is called. In this case,
1570 entries in ``_filecache`` will be used during property operations,
1570 entries in ``_filecache`` will be used during property operations,
1571 if available. If the underlying file changes, it is up to external callers
1571 if available. If the underlying file changes, it is up to external callers
1572 to reflect this by e.g. calling ``delattr(obj, attr)`` to remove the cached
1572 to reflect this by e.g. calling ``delattr(obj, attr)`` to remove the cached
1573 method result as well as possibly calling ``del obj._filecache[attr]`` to
1573 method result as well as possibly calling ``del obj._filecache[attr]`` to
1574 remove the ``filecacheentry``.
1574 remove the ``filecacheentry``.
1575 """
1575 """
1576
1576
1577 def __init__(self, *paths):
1577 def __init__(self, *paths):
1578 self.paths = paths
1578 self.paths = paths
1579
1579
1580 def join(self, obj, fname):
1580 def join(self, obj, fname):
1581 """Used to compute the runtime path of a cached file.
1581 """Used to compute the runtime path of a cached file.
1582
1582
1583 Users should subclass filecache and provide their own version of this
1583 Users should subclass filecache and provide their own version of this
1584 function to call the appropriate join function on 'obj' (an instance
1584 function to call the appropriate join function on 'obj' (an instance
1585 of the class that its member function was decorated).
1585 of the class that its member function was decorated).
1586 """
1586 """
1587 raise NotImplementedError
1587 raise NotImplementedError
1588
1588
1589 def __call__(self, func):
1589 def __call__(self, func):
1590 self.func = func
1590 self.func = func
1591 self.sname = func.__name__
1591 self.sname = func.__name__
1592 self.name = pycompat.sysbytes(self.sname)
1592 self.name = pycompat.sysbytes(self.sname)
1593 return self
1593 return self
1594
1594
1595 def __get__(self, obj, type=None):
1595 def __get__(self, obj, type=None):
1596 # if accessed on the class, return the descriptor itself.
1596 # if accessed on the class, return the descriptor itself.
1597 if obj is None:
1597 if obj is None:
1598 return self
1598 return self
1599
1599
1600 assert self.sname not in obj.__dict__
1600 assert self.sname not in obj.__dict__
1601
1601
1602 entry = obj._filecache.get(self.name)
1602 entry = obj._filecache.get(self.name)
1603
1603
1604 if entry:
1604 if entry:
1605 if entry.changed():
1605 if entry.changed():
1606 entry.obj = self.func(obj)
1606 entry.obj = self.func(obj)
1607 else:
1607 else:
1608 paths = [self.join(obj, path) for path in self.paths]
1608 paths = [self.join(obj, path) for path in self.paths]
1609
1609
1610 # We stat -before- creating the object so our cache doesn't lie if
1610 # We stat -before- creating the object so our cache doesn't lie if
1611 # a writer modified between the time we read and stat
1611 # a writer modified between the time we read and stat
1612 entry = filecacheentry(paths, True)
1612 entry = filecacheentry(paths, True)
1613 entry.obj = self.func(obj)
1613 entry.obj = self.func(obj)
1614
1614
1615 obj._filecache[self.name] = entry
1615 obj._filecache[self.name] = entry
1616
1616
1617 obj.__dict__[self.sname] = entry.obj
1617 obj.__dict__[self.sname] = entry.obj
1618 return entry.obj
1618 return entry.obj
1619
1619
1620 # don't implement __set__(), which would make __dict__ lookup as slow as
1620 # don't implement __set__(), which would make __dict__ lookup as slow as
1621 # function call.
1621 # function call.
1622
1622
1623 def set(self, obj, value):
1623 def set(self, obj, value):
1624 if self.name not in obj._filecache:
1624 if self.name not in obj._filecache:
1625 # we add an entry for the missing value because X in __dict__
1625 # we add an entry for the missing value because X in __dict__
1626 # implies X in _filecache
1626 # implies X in _filecache
1627 paths = [self.join(obj, path) for path in self.paths]
1627 paths = [self.join(obj, path) for path in self.paths]
1628 ce = filecacheentry(paths, False)
1628 ce = filecacheentry(paths, False)
1629 obj._filecache[self.name] = ce
1629 obj._filecache[self.name] = ce
1630 else:
1630 else:
1631 ce = obj._filecache[self.name]
1631 ce = obj._filecache[self.name]
1632
1632
1633 ce.obj = value # update cached copy
1633 ce.obj = value # update cached copy
1634 obj.__dict__[self.sname] = value # update copy returned by obj.x
1634 obj.__dict__[self.sname] = value # update copy returned by obj.x
1635
1635
1636
1636
1637 def extdatasource(repo, source):
1637 def extdatasource(repo, source):
1638 """Gather a map of rev -> value dict from the specified source
1638 """Gather a map of rev -> value dict from the specified source
1639
1639
1640 A source spec is treated as a URL, with a special case shell: type
1640 A source spec is treated as a URL, with a special case shell: type
1641 for parsing the output from a shell command.
1641 for parsing the output from a shell command.
1642
1642
1643 The data is parsed as a series of newline-separated records where
1643 The data is parsed as a series of newline-separated records where
1644 each record is a revision specifier optionally followed by a space
1644 each record is a revision specifier optionally followed by a space
1645 and a freeform string value. If the revision is known locally, it
1645 and a freeform string value. If the revision is known locally, it
1646 is converted to a rev, otherwise the record is skipped.
1646 is converted to a rev, otherwise the record is skipped.
1647
1647
1648 Note that both key and value are treated as UTF-8 and converted to
1648 Note that both key and value are treated as UTF-8 and converted to
1649 the local encoding. This allows uniformity between local and
1649 the local encoding. This allows uniformity between local and
1650 remote data sources.
1650 remote data sources.
1651 """
1651 """
1652
1652
1653 spec = repo.ui.config(b"extdata", source)
1653 spec = repo.ui.config(b"extdata", source)
1654 if not spec:
1654 if not spec:
1655 raise error.Abort(_(b"unknown extdata source '%s'") % source)
1655 raise error.Abort(_(b"unknown extdata source '%s'") % source)
1656
1656
1657 data = {}
1657 data = {}
1658 src = proc = None
1658 src = proc = None
1659 try:
1659 try:
1660 if spec.startswith(b"shell:"):
1660 if spec.startswith(b"shell:"):
1661 # external commands should be run relative to the repo root
1661 # external commands should be run relative to the repo root
1662 cmd = spec[6:]
1662 cmd = spec[6:]
1663 proc = subprocess.Popen(
1663 proc = subprocess.Popen(
1664 procutil.tonativestr(cmd),
1664 procutil.tonativestr(cmd),
1665 shell=True,
1665 shell=True,
1666 bufsize=-1,
1666 bufsize=-1,
1667 close_fds=procutil.closefds,
1667 close_fds=procutil.closefds,
1668 stdout=subprocess.PIPE,
1668 stdout=subprocess.PIPE,
1669 cwd=procutil.tonativestr(repo.root),
1669 cwd=procutil.tonativestr(repo.root),
1670 )
1670 )
1671 src = proc.stdout
1671 src = proc.stdout
1672 else:
1672 else:
1673 # treat as a URL or file
1673 # treat as a URL or file
1674 src = url.open(repo.ui, spec)
1674 src = url.open(repo.ui, spec)
1675 for l in src:
1675 for l in src:
1676 if b" " in l:
1676 if b" " in l:
1677 k, v = l.strip().split(b" ", 1)
1677 k, v = l.strip().split(b" ", 1)
1678 else:
1678 else:
1679 k, v = l.strip(), b""
1679 k, v = l.strip(), b""
1680
1680
1681 k = encoding.tolocal(k)
1681 k = encoding.tolocal(k)
1682 try:
1682 try:
1683 data[revsingle(repo, k).rev()] = encoding.tolocal(v)
1683 data[revsingle(repo, k).rev()] = encoding.tolocal(v)
1684 except (error.LookupError, error.RepoLookupError):
1684 except (error.LookupError, error.RepoLookupError):
1685 pass # we ignore data for nodes that don't exist locally
1685 pass # we ignore data for nodes that don't exist locally
1686 finally:
1686 finally:
1687 if proc:
1687 if proc:
1688 try:
1688 try:
1689 proc.communicate()
1689 proc.communicate()
1690 except ValueError:
1690 except ValueError:
1691 # This happens if we started iterating src and then
1691 # This happens if we started iterating src and then
1692 # get a parse error on a line. It should be safe to ignore.
1692 # get a parse error on a line. It should be safe to ignore.
1693 pass
1693 pass
1694 if src:
1694 if src:
1695 src.close()
1695 src.close()
1696 if proc and proc.returncode != 0:
1696 if proc and proc.returncode != 0:
1697 raise error.Abort(
1697 raise error.Abort(
1698 _(b"extdata command '%s' failed: %s")
1698 _(b"extdata command '%s' failed: %s")
1699 % (cmd, procutil.explainexit(proc.returncode))
1699 % (cmd, procutil.explainexit(proc.returncode))
1700 )
1700 )
1701
1701
1702 return data
1702 return data
1703
1703
1704
1704
1705 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1705 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1706 if lock is None:
1706 if lock is None:
1707 raise error.LockInheritanceContractViolation(
1707 raise error.LockInheritanceContractViolation(
1708 b'lock can only be inherited while held'
1708 b'lock can only be inherited while held'
1709 )
1709 )
1710 if environ is None:
1710 if environ is None:
1711 environ = {}
1711 environ = {}
1712 with lock.inherit() as locker:
1712 with lock.inherit() as locker:
1713 environ[envvar] = locker
1713 environ[envvar] = locker
1714 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1714 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1715
1715
1716
1716
1717 def wlocksub(repo, cmd, *args, **kwargs):
1717 def wlocksub(repo, cmd, *args, **kwargs):
1718 """run cmd as a subprocess that allows inheriting repo's wlock
1718 """run cmd as a subprocess that allows inheriting repo's wlock
1719
1719
1720 This can only be called while the wlock is held. This takes all the
1720 This can only be called while the wlock is held. This takes all the
1721 arguments that ui.system does, and returns the exit code of the
1721 arguments that ui.system does, and returns the exit code of the
1722 subprocess."""
1722 subprocess."""
1723 return _locksub(
1723 return _locksub(
1724 repo, repo.currentwlock(), b'HG_WLOCK_LOCKER', cmd, *args, **kwargs
1724 repo, repo.currentwlock(), b'HG_WLOCK_LOCKER', cmd, *args, **kwargs
1725 )
1725 )
1726
1726
1727
1727
1728 class progress(object):
1728 class progress(object):
1729 def __init__(self, ui, updatebar, topic, unit=b"", total=None):
1729 def __init__(self, ui, updatebar, topic, unit=b"", total=None):
1730 self.ui = ui
1730 self.ui = ui
1731 self.pos = 0
1731 self.pos = 0
1732 self.topic = topic
1732 self.topic = topic
1733 self.unit = unit
1733 self.unit = unit
1734 self.total = total
1734 self.total = total
1735 self.debug = ui.configbool(b'progress', b'debug')
1735 self.debug = ui.configbool(b'progress', b'debug')
1736 self._updatebar = updatebar
1736 self._updatebar = updatebar
1737
1737
1738 def __enter__(self):
1738 def __enter__(self):
1739 return self
1739 return self
1740
1740
1741 def __exit__(self, exc_type, exc_value, exc_tb):
1741 def __exit__(self, exc_type, exc_value, exc_tb):
1742 self.complete()
1742 self.complete()
1743
1743
1744 def update(self, pos, item=b"", total=None):
1744 def update(self, pos, item=b"", total=None):
1745 assert pos is not None
1745 assert pos is not None
1746 if total:
1746 if total:
1747 self.total = total
1747 self.total = total
1748 self.pos = pos
1748 self.pos = pos
1749 self._updatebar(self.topic, self.pos, item, self.unit, self.total)
1749 self._updatebar(self.topic, self.pos, item, self.unit, self.total)
1750 if self.debug:
1750 if self.debug:
1751 self._printdebug(item)
1751 self._printdebug(item)
1752
1752
1753 def increment(self, step=1, item=b"", total=None):
1753 def increment(self, step=1, item=b"", total=None):
1754 self.update(self.pos + step, item, total)
1754 self.update(self.pos + step, item, total)
1755
1755
1756 def complete(self):
1756 def complete(self):
1757 self.pos = None
1757 self.pos = None
1758 self.unit = b""
1758 self.unit = b""
1759 self.total = None
1759 self.total = None
1760 self._updatebar(self.topic, self.pos, b"", self.unit, self.total)
1760 self._updatebar(self.topic, self.pos, b"", self.unit, self.total)
1761
1761
1762 def _printdebug(self, item):
1762 def _printdebug(self, item):
1763 if self.unit:
1763 if self.unit:
1764 unit = b' ' + self.unit
1764 unit = b' ' + self.unit
1765 if item:
1765 if item:
1766 item = b' ' + item
1766 item = b' ' + item
1767
1767
1768 if self.total:
1768 if self.total:
1769 pct = 100.0 * self.pos / self.total
1769 pct = 100.0 * self.pos / self.total
1770 self.ui.debug(
1770 self.ui.debug(
1771 b'%s:%s %d/%d%s (%4.2f%%)\n'
1771 b'%s:%s %d/%d%s (%4.2f%%)\n'
1772 % (self.topic, item, self.pos, self.total, unit, pct)
1772 % (self.topic, item, self.pos, self.total, unit, pct)
1773 )
1773 )
1774 else:
1774 else:
1775 self.ui.debug(b'%s:%s %d%s\n' % (self.topic, item, self.pos, unit))
1775 self.ui.debug(b'%s:%s %d%s\n' % (self.topic, item, self.pos, unit))
1776
1776
1777
1777
1778 def gdinitconfig(ui):
1778 def gdinitconfig(ui):
1779 """helper function to know if a repo should be created as general delta
1779 """helper function to know if a repo should be created as general delta
1780 """
1780 """
1781 # experimental config: format.generaldelta
1781 # experimental config: format.generaldelta
1782 return ui.configbool(b'format', b'generaldelta') or ui.configbool(
1782 return ui.configbool(b'format', b'generaldelta') or ui.configbool(
1783 b'format', b'usegeneraldelta'
1783 b'format', b'usegeneraldelta'
1784 )
1784 )
1785
1785
1786
1786
1787 def gddeltaconfig(ui):
1787 def gddeltaconfig(ui):
1788 """helper function to know if incoming delta should be optimised
1788 """helper function to know if incoming delta should be optimised
1789 """
1789 """
1790 # experimental config: format.generaldelta
1790 # experimental config: format.generaldelta
1791 return ui.configbool(b'format', b'generaldelta')
1791 return ui.configbool(b'format', b'generaldelta')
1792
1792
1793
1793
1794 class simplekeyvaluefile(object):
1794 class simplekeyvaluefile(object):
1795 """A simple file with key=value lines
1795 """A simple file with key=value lines
1796
1796
1797 Keys must be alphanumerics and start with a letter, values must not
1797 Keys must be alphanumerics and start with a letter, values must not
1798 contain '\n' characters"""
1798 contain '\n' characters"""
1799
1799
1800 firstlinekey = b'__firstline'
1800 firstlinekey = b'__firstline'
1801
1801
1802 def __init__(self, vfs, path, keys=None):
1802 def __init__(self, vfs, path, keys=None):
1803 self.vfs = vfs
1803 self.vfs = vfs
1804 self.path = path
1804 self.path = path
1805
1805
1806 def read(self, firstlinenonkeyval=False):
1806 def read(self, firstlinenonkeyval=False):
1807 """Read the contents of a simple key-value file
1807 """Read the contents of a simple key-value file
1808
1808
1809 'firstlinenonkeyval' indicates whether the first line of file should
1809 'firstlinenonkeyval' indicates whether the first line of file should
1810 be treated as a key-value pair or reuturned fully under the
1810 be treated as a key-value pair or reuturned fully under the
1811 __firstline key."""
1811 __firstline key."""
1812 lines = self.vfs.readlines(self.path)
1812 lines = self.vfs.readlines(self.path)
1813 d = {}
1813 d = {}
1814 if firstlinenonkeyval:
1814 if firstlinenonkeyval:
1815 if not lines:
1815 if not lines:
1816 e = _(b"empty simplekeyvalue file")
1816 e = _(b"empty simplekeyvalue file")
1817 raise error.CorruptedState(e)
1817 raise error.CorruptedState(e)
1818 # we don't want to include '\n' in the __firstline
1818 # we don't want to include '\n' in the __firstline
1819 d[self.firstlinekey] = lines[0][:-1]
1819 d[self.firstlinekey] = lines[0][:-1]
1820 del lines[0]
1820 del lines[0]
1821
1821
1822 try:
1822 try:
1823 # the 'if line.strip()' part prevents us from failing on empty
1823 # the 'if line.strip()' part prevents us from failing on empty
1824 # lines which only contain '\n' therefore are not skipped
1824 # lines which only contain '\n' therefore are not skipped
1825 # by 'if line'
1825 # by 'if line'
1826 updatedict = dict(
1826 updatedict = dict(
1827 line[:-1].split(b'=', 1) for line in lines if line.strip()
1827 line[:-1].split(b'=', 1) for line in lines if line.strip()
1828 )
1828 )
1829 if self.firstlinekey in updatedict:
1829 if self.firstlinekey in updatedict:
1830 e = _(b"%r can't be used as a key")
1830 e = _(b"%r can't be used as a key")
1831 raise error.CorruptedState(e % self.firstlinekey)
1831 raise error.CorruptedState(e % self.firstlinekey)
1832 d.update(updatedict)
1832 d.update(updatedict)
1833 except ValueError as e:
1833 except ValueError as e:
1834 raise error.CorruptedState(stringutil.forcebytestr(e))
1834 raise error.CorruptedState(stringutil.forcebytestr(e))
1835 return d
1835 return d
1836
1836
1837 def write(self, data, firstline=None):
1837 def write(self, data, firstline=None):
1838 """Write key=>value mapping to a file
1838 """Write key=>value mapping to a file
1839 data is a dict. Keys must be alphanumerical and start with a letter.
1839 data is a dict. Keys must be alphanumerical and start with a letter.
1840 Values must not contain newline characters.
1840 Values must not contain newline characters.
1841
1841
1842 If 'firstline' is not None, it is written to file before
1842 If 'firstline' is not None, it is written to file before
1843 everything else, as it is, not in a key=value form"""
1843 everything else, as it is, not in a key=value form"""
1844 lines = []
1844 lines = []
1845 if firstline is not None:
1845 if firstline is not None:
1846 lines.append(b'%s\n' % firstline)
1846 lines.append(b'%s\n' % firstline)
1847
1847
1848 for k, v in data.items():
1848 for k, v in data.items():
1849 if k == self.firstlinekey:
1849 if k == self.firstlinekey:
1850 e = b"key name '%s' is reserved" % self.firstlinekey
1850 e = b"key name '%s' is reserved" % self.firstlinekey
1851 raise error.ProgrammingError(e)
1851 raise error.ProgrammingError(e)
1852 if not k[0:1].isalpha():
1852 if not k[0:1].isalpha():
1853 e = b"keys must start with a letter in a key-value file"
1853 e = b"keys must start with a letter in a key-value file"
1854 raise error.ProgrammingError(e)
1854 raise error.ProgrammingError(e)
1855 if not k.isalnum():
1855 if not k.isalnum():
1856 e = b"invalid key name in a simple key-value file"
1856 e = b"invalid key name in a simple key-value file"
1857 raise error.ProgrammingError(e)
1857 raise error.ProgrammingError(e)
1858 if b'\n' in v:
1858 if b'\n' in v:
1859 e = b"invalid value in a simple key-value file"
1859 e = b"invalid value in a simple key-value file"
1860 raise error.ProgrammingError(e)
1860 raise error.ProgrammingError(e)
1861 lines.append(b"%s=%s\n" % (k, v))
1861 lines.append(b"%s=%s\n" % (k, v))
1862 with self.vfs(self.path, mode=b'wb', atomictemp=True) as fp:
1862 with self.vfs(self.path, mode=b'wb', atomictemp=True) as fp:
1863 fp.write(b''.join(lines))
1863 fp.write(b''.join(lines))
1864
1864
1865
1865
1866 _reportobsoletedsource = [
1866 _reportobsoletedsource = [
1867 b'debugobsolete',
1867 b'debugobsolete',
1868 b'pull',
1868 b'pull',
1869 b'push',
1869 b'push',
1870 b'serve',
1870 b'serve',
1871 b'unbundle',
1871 b'unbundle',
1872 ]
1872 ]
1873
1873
1874 _reportnewcssource = [
1874 _reportnewcssource = [
1875 b'pull',
1875 b'pull',
1876 b'unbundle',
1876 b'unbundle',
1877 ]
1877 ]
1878
1878
1879
1879
1880 def prefetchfiles(repo, revs, match):
1880 def prefetchfiles(repo, revs, match):
1881 """Invokes the registered file prefetch functions, allowing extensions to
1881 """Invokes the registered file prefetch functions, allowing extensions to
1882 ensure the corresponding files are available locally, before the command
1882 ensure the corresponding files are available locally, before the command
1883 uses them."""
1883 uses them."""
1884 if match:
1884 if match:
1885 # The command itself will complain about files that don't exist, so
1885 # The command itself will complain about files that don't exist, so
1886 # don't duplicate the message.
1886 # don't duplicate the message.
1887 match = matchmod.badmatch(match, lambda fn, msg: None)
1887 match = matchmod.badmatch(match, lambda fn, msg: None)
1888 else:
1888 else:
1889 match = matchall(repo)
1889 match = matchall(repo)
1890
1890
1891 fileprefetchhooks(repo, revs, match)
1891 fileprefetchhooks(repo, revs, match)
1892
1892
1893
1893
1894 # a list of (repo, revs, match) prefetch functions
1894 # a list of (repo, revs, match) prefetch functions
1895 fileprefetchhooks = util.hooks()
1895 fileprefetchhooks = util.hooks()
1896
1896
1897 # A marker that tells the evolve extension to suppress its own reporting
1897 # A marker that tells the evolve extension to suppress its own reporting
1898 _reportstroubledchangesets = True
1898 _reportstroubledchangesets = True
1899
1899
1900
1900
1901 def registersummarycallback(repo, otr, txnname=b''):
1901 def registersummarycallback(repo, otr, txnname=b''):
1902 """register a callback to issue a summary after the transaction is closed
1902 """register a callback to issue a summary after the transaction is closed
1903 """
1903 """
1904
1904
1905 def txmatch(sources):
1905 def txmatch(sources):
1906 return any(txnname.startswith(source) for source in sources)
1906 return any(txnname.startswith(source) for source in sources)
1907
1907
1908 categories = []
1908 categories = []
1909
1909
1910 def reportsummary(func):
1910 def reportsummary(func):
1911 """decorator for report callbacks."""
1911 """decorator for report callbacks."""
1912 # The repoview life cycle is shorter than the one of the actual
1912 # The repoview life cycle is shorter than the one of the actual
1913 # underlying repository. So the filtered object can die before the
1913 # underlying repository. So the filtered object can die before the
1914 # weakref is used leading to troubles. We keep a reference to the
1914 # weakref is used leading to troubles. We keep a reference to the
1915 # unfiltered object and restore the filtering when retrieving the
1915 # unfiltered object and restore the filtering when retrieving the
1916 # repository through the weakref.
1916 # repository through the weakref.
1917 filtername = repo.filtername
1917 filtername = repo.filtername
1918 reporef = weakref.ref(repo.unfiltered())
1918 reporef = weakref.ref(repo.unfiltered())
1919
1919
1920 def wrapped(tr):
1920 def wrapped(tr):
1921 repo = reporef()
1921 repo = reporef()
1922 if filtername:
1922 if filtername:
1923 assert repo is not None # help pytype
1923 assert repo is not None # help pytype
1924 repo = repo.filtered(filtername)
1924 repo = repo.filtered(filtername)
1925 func(repo, tr)
1925 func(repo, tr)
1926
1926
1927 newcat = b'%02i-txnreport' % len(categories)
1927 newcat = b'%02i-txnreport' % len(categories)
1928 otr.addpostclose(newcat, wrapped)
1928 otr.addpostclose(newcat, wrapped)
1929 categories.append(newcat)
1929 categories.append(newcat)
1930 return wrapped
1930 return wrapped
1931
1931
1932 @reportsummary
1932 @reportsummary
1933 def reportchangegroup(repo, tr):
1933 def reportchangegroup(repo, tr):
1934 cgchangesets = tr.changes.get(b'changegroup-count-changesets', 0)
1934 cgchangesets = tr.changes.get(b'changegroup-count-changesets', 0)
1935 cgrevisions = tr.changes.get(b'changegroup-count-revisions', 0)
1935 cgrevisions = tr.changes.get(b'changegroup-count-revisions', 0)
1936 cgfiles = tr.changes.get(b'changegroup-count-files', 0)
1936 cgfiles = tr.changes.get(b'changegroup-count-files', 0)
1937 cgheads = tr.changes.get(b'changegroup-count-heads', 0)
1937 cgheads = tr.changes.get(b'changegroup-count-heads', 0)
1938 if cgchangesets or cgrevisions or cgfiles:
1938 if cgchangesets or cgrevisions or cgfiles:
1939 htext = b""
1939 htext = b""
1940 if cgheads:
1940 if cgheads:
1941 htext = _(b" (%+d heads)") % cgheads
1941 htext = _(b" (%+d heads)") % cgheads
1942 msg = _(b"added %d changesets with %d changes to %d files%s\n")
1942 msg = _(b"added %d changesets with %d changes to %d files%s\n")
1943 assert repo is not None # help pytype
1943 assert repo is not None # help pytype
1944 repo.ui.status(msg % (cgchangesets, cgrevisions, cgfiles, htext))
1944 repo.ui.status(msg % (cgchangesets, cgrevisions, cgfiles, htext))
1945
1945
1946 if txmatch(_reportobsoletedsource):
1946 if txmatch(_reportobsoletedsource):
1947
1947
1948 @reportsummary
1948 @reportsummary
1949 def reportobsoleted(repo, tr):
1949 def reportobsoleted(repo, tr):
1950 obsoleted = obsutil.getobsoleted(repo, tr)
1950 obsoleted = obsutil.getobsoleted(repo, tr)
1951 newmarkers = len(tr.changes.get(b'obsmarkers', ()))
1951 newmarkers = len(tr.changes.get(b'obsmarkers', ()))
1952 if newmarkers:
1952 if newmarkers:
1953 repo.ui.status(_(b'%i new obsolescence markers\n') % newmarkers)
1953 repo.ui.status(_(b'%i new obsolescence markers\n') % newmarkers)
1954 if obsoleted:
1954 if obsoleted:
1955 repo.ui.status(_(b'obsoleted %i changesets\n') % len(obsoleted))
1955 repo.ui.status(_(b'obsoleted %i changesets\n') % len(obsoleted))
1956
1956
1957 if obsolete.isenabled(
1957 if obsolete.isenabled(
1958 repo, obsolete.createmarkersopt
1958 repo, obsolete.createmarkersopt
1959 ) and repo.ui.configbool(
1959 ) and repo.ui.configbool(
1960 b'experimental', b'evolution.report-instabilities'
1960 b'experimental', b'evolution.report-instabilities'
1961 ):
1961 ):
1962 instabilitytypes = [
1962 instabilitytypes = [
1963 (b'orphan', b'orphan'),
1963 (b'orphan', b'orphan'),
1964 (b'phase-divergent', b'phasedivergent'),
1964 (b'phase-divergent', b'phasedivergent'),
1965 (b'content-divergent', b'contentdivergent'),
1965 (b'content-divergent', b'contentdivergent'),
1966 ]
1966 ]
1967
1967
1968 def getinstabilitycounts(repo):
1968 def getinstabilitycounts(repo):
1969 filtered = repo.changelog.filteredrevs
1969 filtered = repo.changelog.filteredrevs
1970 counts = {}
1970 counts = {}
1971 for instability, revset in instabilitytypes:
1971 for instability, revset in instabilitytypes:
1972 counts[instability] = len(
1972 counts[instability] = len(
1973 set(obsolete.getrevs(repo, revset)) - filtered
1973 set(obsolete.getrevs(repo, revset)) - filtered
1974 )
1974 )
1975 return counts
1975 return counts
1976
1976
1977 oldinstabilitycounts = getinstabilitycounts(repo)
1977 oldinstabilitycounts = getinstabilitycounts(repo)
1978
1978
1979 @reportsummary
1979 @reportsummary
1980 def reportnewinstabilities(repo, tr):
1980 def reportnewinstabilities(repo, tr):
1981 newinstabilitycounts = getinstabilitycounts(repo)
1981 newinstabilitycounts = getinstabilitycounts(repo)
1982 for instability, revset in instabilitytypes:
1982 for instability, revset in instabilitytypes:
1983 delta = (
1983 delta = (
1984 newinstabilitycounts[instability]
1984 newinstabilitycounts[instability]
1985 - oldinstabilitycounts[instability]
1985 - oldinstabilitycounts[instability]
1986 )
1986 )
1987 msg = getinstabilitymessage(delta, instability)
1987 msg = getinstabilitymessage(delta, instability)
1988 if msg:
1988 if msg:
1989 repo.ui.warn(msg)
1989 repo.ui.warn(msg)
1990
1990
1991 if txmatch(_reportnewcssource):
1991 if txmatch(_reportnewcssource):
1992
1992
1993 @reportsummary
1993 @reportsummary
1994 def reportnewcs(repo, tr):
1994 def reportnewcs(repo, tr):
1995 """Report the range of new revisions pulled/unbundled."""
1995 """Report the range of new revisions pulled/unbundled."""
1996 origrepolen = tr.changes.get(b'origrepolen', len(repo))
1996 origrepolen = tr.changes.get(b'origrepolen', len(repo))
1997 unfi = repo.unfiltered()
1997 unfi = repo.unfiltered()
1998 if origrepolen >= len(unfi):
1998 if origrepolen >= len(unfi):
1999 return
1999 return
2000
2000
2001 # Compute the bounds of new visible revisions' range.
2001 # Compute the bounds of new visible revisions' range.
2002 revs = smartset.spanset(repo, start=origrepolen)
2002 revs = smartset.spanset(repo, start=origrepolen)
2003 if revs:
2003 if revs:
2004 minrev, maxrev = repo[revs.min()], repo[revs.max()]
2004 minrev, maxrev = repo[revs.min()], repo[revs.max()]
2005
2005
2006 if minrev == maxrev:
2006 if minrev == maxrev:
2007 revrange = minrev
2007 revrange = minrev
2008 else:
2008 else:
2009 revrange = b'%s:%s' % (minrev, maxrev)
2009 revrange = b'%s:%s' % (minrev, maxrev)
2010 draft = len(repo.revs(b'%ld and draft()', revs))
2010 draft = len(repo.revs(b'%ld and draft()', revs))
2011 secret = len(repo.revs(b'%ld and secret()', revs))
2011 secret = len(repo.revs(b'%ld and secret()', revs))
2012 if not (draft or secret):
2012 if not (draft or secret):
2013 msg = _(b'new changesets %s\n') % revrange
2013 msg = _(b'new changesets %s\n') % revrange
2014 elif draft and secret:
2014 elif draft and secret:
2015 msg = _(b'new changesets %s (%d drafts, %d secrets)\n')
2015 msg = _(b'new changesets %s (%d drafts, %d secrets)\n')
2016 msg %= (revrange, draft, secret)
2016 msg %= (revrange, draft, secret)
2017 elif draft:
2017 elif draft:
2018 msg = _(b'new changesets %s (%d drafts)\n')
2018 msg = _(b'new changesets %s (%d drafts)\n')
2019 msg %= (revrange, draft)
2019 msg %= (revrange, draft)
2020 elif secret:
2020 elif secret:
2021 msg = _(b'new changesets %s (%d secrets)\n')
2021 msg = _(b'new changesets %s (%d secrets)\n')
2022 msg %= (revrange, secret)
2022 msg %= (revrange, secret)
2023 else:
2023 else:
2024 errormsg = b'entered unreachable condition'
2024 errormsg = b'entered unreachable condition'
2025 raise error.ProgrammingError(errormsg)
2025 raise error.ProgrammingError(errormsg)
2026 repo.ui.status(msg)
2026 repo.ui.status(msg)
2027
2027
2028 # search new changesets directly pulled as obsolete
2028 # search new changesets directly pulled as obsolete
2029 duplicates = tr.changes.get(b'revduplicates', ())
2029 duplicates = tr.changes.get(b'revduplicates', ())
2030 obsadded = unfi.revs(
2030 obsadded = unfi.revs(
2031 b'(%d: + %ld) and obsolete()', origrepolen, duplicates
2031 b'(%d: + %ld) and obsolete()', origrepolen, duplicates
2032 )
2032 )
2033 cl = repo.changelog
2033 cl = repo.changelog
2034 extinctadded = [r for r in obsadded if r not in cl]
2034 extinctadded = [r for r in obsadded if r not in cl]
2035 if extinctadded:
2035 if extinctadded:
2036 # They are not just obsolete, but obsolete and invisible
2036 # They are not just obsolete, but obsolete and invisible
2037 # we call them "extinct" internally but the terms have not been
2037 # we call them "extinct" internally but the terms have not been
2038 # exposed to users.
2038 # exposed to users.
2039 msg = b'(%d other changesets obsolete on arrival)\n'
2039 msg = b'(%d other changesets obsolete on arrival)\n'
2040 repo.ui.status(msg % len(extinctadded))
2040 repo.ui.status(msg % len(extinctadded))
2041
2041
2042 @reportsummary
2042 @reportsummary
2043 def reportphasechanges(repo, tr):
2043 def reportphasechanges(repo, tr):
2044 """Report statistics of phase changes for changesets pre-existing
2044 """Report statistics of phase changes for changesets pre-existing
2045 pull/unbundle.
2045 pull/unbundle.
2046 """
2046 """
2047 origrepolen = tr.changes.get(b'origrepolen', len(repo))
2047 origrepolen = tr.changes.get(b'origrepolen', len(repo))
2048 phasetracking = tr.changes.get(b'phases', {})
2048 phasetracking = tr.changes.get(b'phases', {})
2049 if not phasetracking:
2049 if not phasetracking:
2050 return
2050 return
2051 published = [
2051 published = [
2052 rev
2052 rev
2053 for rev, (old, new) in pycompat.iteritems(phasetracking)
2053 for rev, (old, new) in pycompat.iteritems(phasetracking)
2054 if new == phases.public and rev < origrepolen
2054 if new == phases.public and rev < origrepolen
2055 ]
2055 ]
2056 if not published:
2056 if not published:
2057 return
2057 return
2058 repo.ui.status(
2058 repo.ui.status(
2059 _(b'%d local changesets published\n') % len(published)
2059 _(b'%d local changesets published\n') % len(published)
2060 )
2060 )
2061
2061
2062
2062
2063 def getinstabilitymessage(delta, instability):
2063 def getinstabilitymessage(delta, instability):
2064 """function to return the message to show warning about new instabilities
2064 """function to return the message to show warning about new instabilities
2065
2065
2066 exists as a separate function so that extension can wrap to show more
2066 exists as a separate function so that extension can wrap to show more
2067 information like how to fix instabilities"""
2067 information like how to fix instabilities"""
2068 if delta > 0:
2068 if delta > 0:
2069 return _(b'%i new %s changesets\n') % (delta, instability)
2069 return _(b'%i new %s changesets\n') % (delta, instability)
2070
2070
2071
2071
2072 def nodesummaries(repo, nodes, maxnumnodes=4):
2072 def nodesummaries(repo, nodes, maxnumnodes=4):
2073 if len(nodes) <= maxnumnodes or repo.ui.verbose:
2073 if len(nodes) <= maxnumnodes or repo.ui.verbose:
2074 return b' '.join(short(h) for h in nodes)
2074 return b' '.join(short(h) for h in nodes)
2075 first = b' '.join(short(h) for h in nodes[:maxnumnodes])
2075 first = b' '.join(short(h) for h in nodes[:maxnumnodes])
2076 return _(b"%s and %d others") % (first, len(nodes) - maxnumnodes)
2076 return _(b"%s and %d others") % (first, len(nodes) - maxnumnodes)
2077
2077
2078
2078
2079 def enforcesinglehead(repo, tr, desc, accountclosed=False):
2079 def enforcesinglehead(repo, tr, desc, accountclosed=False):
2080 """check that no named branch has multiple heads"""
2080 """check that no named branch has multiple heads"""
2081 if desc in (b'strip', b'repair'):
2081 if desc in (b'strip', b'repair'):
2082 # skip the logic during strip
2082 # skip the logic during strip
2083 return
2083 return
2084 visible = repo.filtered(b'visible')
2084 visible = repo.filtered(b'visible')
2085 # possible improvement: we could restrict the check to affected branch
2085 # possible improvement: we could restrict the check to affected branch
2086 bm = visible.branchmap()
2086 bm = visible.branchmap()
2087 for name in bm:
2087 for name in bm:
2088 heads = bm.branchheads(name, closed=accountclosed)
2088 heads = bm.branchheads(name, closed=accountclosed)
2089 if len(heads) > 1:
2089 if len(heads) > 1:
2090 msg = _(b'rejecting multiple heads on branch "%s"')
2090 msg = _(b'rejecting multiple heads on branch "%s"')
2091 msg %= name
2091 msg %= name
2092 hint = _(b'%d heads: %s')
2092 hint = _(b'%d heads: %s')
2093 hint %= (len(heads), nodesummaries(repo, heads))
2093 hint %= (len(heads), nodesummaries(repo, heads))
2094 raise error.Abort(msg, hint=hint)
2094 raise error.Abort(msg, hint=hint)
2095
2095
2096
2096
2097 def wrapconvertsink(sink):
2097 def wrapconvertsink(sink):
2098 """Allow extensions to wrap the sink returned by convcmd.convertsink()
2098 """Allow extensions to wrap the sink returned by convcmd.convertsink()
2099 before it is used, whether or not the convert extension was formally loaded.
2099 before it is used, whether or not the convert extension was formally loaded.
2100 """
2100 """
2101 return sink
2101 return sink
2102
2102
2103
2103
2104 def unhidehashlikerevs(repo, specs, hiddentype):
2104 def unhidehashlikerevs(repo, specs, hiddentype):
2105 """parse the user specs and unhide changesets whose hash or revision number
2105 """parse the user specs and unhide changesets whose hash or revision number
2106 is passed.
2106 is passed.
2107
2107
2108 hiddentype can be: 1) 'warn': warn while unhiding changesets
2108 hiddentype can be: 1) 'warn': warn while unhiding changesets
2109 2) 'nowarn': don't warn while unhiding changesets
2109 2) 'nowarn': don't warn while unhiding changesets
2110
2110
2111 returns a repo object with the required changesets unhidden
2111 returns a repo object with the required changesets unhidden
2112 """
2112 """
2113 if not repo.filtername or not repo.ui.configbool(
2113 if not repo.filtername or not repo.ui.configbool(
2114 b'experimental', b'directaccess'
2114 b'experimental', b'directaccess'
2115 ):
2115 ):
2116 return repo
2116 return repo
2117
2117
2118 if repo.filtername not in (b'visible', b'visible-hidden'):
2118 if repo.filtername not in (b'visible', b'visible-hidden'):
2119 return repo
2119 return repo
2120
2120
2121 symbols = set()
2121 symbols = set()
2122 for spec in specs:
2122 for spec in specs:
2123 try:
2123 try:
2124 tree = revsetlang.parse(spec)
2124 tree = revsetlang.parse(spec)
2125 except error.ParseError: # will be reported by scmutil.revrange()
2125 except error.ParseError: # will be reported by scmutil.revrange()
2126 continue
2126 continue
2127
2127
2128 symbols.update(revsetlang.gethashlikesymbols(tree))
2128 symbols.update(revsetlang.gethashlikesymbols(tree))
2129
2129
2130 if not symbols:
2130 if not symbols:
2131 return repo
2131 return repo
2132
2132
2133 revs = _getrevsfromsymbols(repo, symbols)
2133 revs = _getrevsfromsymbols(repo, symbols)
2134
2134
2135 if not revs:
2135 if not revs:
2136 return repo
2136 return repo
2137
2137
2138 if hiddentype == b'warn':
2138 if hiddentype == b'warn':
2139 unfi = repo.unfiltered()
2139 unfi = repo.unfiltered()
2140 revstr = b", ".join([pycompat.bytestr(unfi[l]) for l in revs])
2140 revstr = b", ".join([pycompat.bytestr(unfi[l]) for l in revs])
2141 repo.ui.warn(
2141 repo.ui.warn(
2142 _(
2142 _(
2143 b"warning: accessing hidden changesets for write "
2143 b"warning: accessing hidden changesets for write "
2144 b"operation: %s\n"
2144 b"operation: %s\n"
2145 )
2145 )
2146 % revstr
2146 % revstr
2147 )
2147 )
2148
2148
2149 # we have to use new filtername to separate branch/tags cache until we can
2149 # we have to use new filtername to separate branch/tags cache until we can
2150 # disbale these cache when revisions are dynamically pinned.
2150 # disbale these cache when revisions are dynamically pinned.
2151 return repo.filtered(b'visible-hidden', revs)
2151 return repo.filtered(b'visible-hidden', revs)
2152
2152
2153
2153
2154 def _getrevsfromsymbols(repo, symbols):
2154 def _getrevsfromsymbols(repo, symbols):
2155 """parse the list of symbols and returns a set of revision numbers of hidden
2155 """parse the list of symbols and returns a set of revision numbers of hidden
2156 changesets present in symbols"""
2156 changesets present in symbols"""
2157 revs = set()
2157 revs = set()
2158 unfi = repo.unfiltered()
2158 unfi = repo.unfiltered()
2159 unficl = unfi.changelog
2159 unficl = unfi.changelog
2160 cl = repo.changelog
2160 cl = repo.changelog
2161 tiprev = len(unficl)
2161 tiprev = len(unficl)
2162 allowrevnums = repo.ui.configbool(b'experimental', b'directaccess.revnums')
2162 allowrevnums = repo.ui.configbool(b'experimental', b'directaccess.revnums')
2163 for s in symbols:
2163 for s in symbols:
2164 try:
2164 try:
2165 n = int(s)
2165 n = int(s)
2166 if n <= tiprev:
2166 if n <= tiprev:
2167 if not allowrevnums:
2167 if not allowrevnums:
2168 continue
2168 continue
2169 else:
2169 else:
2170 if n not in cl:
2170 if n not in cl:
2171 revs.add(n)
2171 revs.add(n)
2172 continue
2172 continue
2173 except ValueError:
2173 except ValueError:
2174 pass
2174 pass
2175
2175
2176 try:
2176 try:
2177 s = resolvehexnodeidprefix(unfi, s)
2177 s = resolvehexnodeidprefix(unfi, s)
2178 except (error.LookupError, error.WdirUnsupported):
2178 except (error.LookupError, error.WdirUnsupported):
2179 s = None
2179 s = None
2180
2180
2181 if s is not None:
2181 if s is not None:
2182 rev = unficl.rev(s)
2182 rev = unficl.rev(s)
2183 if rev not in cl:
2183 if rev not in cl:
2184 revs.add(rev)
2184 revs.add(rev)
2185
2185
2186 return revs
2186 return revs
2187
2187
2188
2188
2189 def bookmarkrevs(repo, mark):
2189 def bookmarkrevs(repo, mark):
2190 """
2190 """
2191 Select revisions reachable by a given bookmark
2191 Select revisions reachable by a given bookmark
2192 """
2192 """
2193 return repo.revs(
2193 return repo.revs(
2194 b"ancestors(bookmark(%s)) - "
2194 b"ancestors(bookmark(%s)) - "
2195 b"ancestors(head() and not bookmark(%s)) - "
2195 b"ancestors(head() and not bookmark(%s)) - "
2196 b"ancestors(bookmark() and not bookmark(%s))",
2196 b"ancestors(bookmark() and not bookmark(%s))",
2197 mark,
2197 mark,
2198 mark,
2198 mark,
2199 mark,
2199 mark,
2200 )
2200 )
General Comments 0
You need to be logged in to leave comments. Login now