##// END OF EJS Templates
scmutil: introduce filterrequirements() to split reqs into wc and store ones...
Pulkit Goyal -
r46054:9a99ab82 default
parent child Browse files
Show More
@@ -1,2230 +1,2254 b''
1 # scmutil.py - Mercurial core utility functions
1 # scmutil.py - Mercurial core utility functions
2 #
2 #
3 # Copyright Matt Mackall <mpm@selenic.com>
3 # Copyright Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import glob
11 import glob
12 import os
12 import os
13 import posixpath
13 import posixpath
14 import re
14 import re
15 import subprocess
15 import subprocess
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 bin,
20 bin,
21 hex,
21 hex,
22 nullid,
22 nullid,
23 nullrev,
23 nullrev,
24 short,
24 short,
25 wdirid,
25 wdirid,
26 wdirrev,
26 wdirrev,
27 )
27 )
28 from .pycompat import getattr
28 from .pycompat import getattr
29 from .thirdparty import attr
29 from .thirdparty import attr
30 from . import (
30 from . import (
31 copies as copiesmod,
31 copies as copiesmod,
32 encoding,
32 encoding,
33 error,
33 error,
34 match as matchmod,
34 match as matchmod,
35 obsolete,
35 obsolete,
36 obsutil,
36 obsutil,
37 pathutil,
37 pathutil,
38 phases,
38 phases,
39 policy,
39 policy,
40 pycompat,
40 pycompat,
41 requirements as requirementsmod,
41 revsetlang,
42 revsetlang,
42 similar,
43 similar,
43 smartset,
44 smartset,
44 url,
45 url,
45 util,
46 util,
46 vfs,
47 vfs,
47 )
48 )
48
49
49 from .utils import (
50 from .utils import (
50 hashutil,
51 hashutil,
51 procutil,
52 procutil,
52 stringutil,
53 stringutil,
53 )
54 )
54
55
55 if pycompat.iswindows:
56 if pycompat.iswindows:
56 from . import scmwindows as scmplatform
57 from . import scmwindows as scmplatform
57 else:
58 else:
58 from . import scmposix as scmplatform
59 from . import scmposix as scmplatform
59
60
60 parsers = policy.importmod('parsers')
61 parsers = policy.importmod('parsers')
61 rustrevlog = policy.importrust('revlog')
62 rustrevlog = policy.importrust('revlog')
62
63
63 termsize = scmplatform.termsize
64 termsize = scmplatform.termsize
64
65
65
66
66 @attr.s(slots=True, repr=False)
67 @attr.s(slots=True, repr=False)
67 class status(object):
68 class status(object):
68 '''Struct with a list of files per status.
69 '''Struct with a list of files per status.
69
70
70 The 'deleted', 'unknown' and 'ignored' properties are only
71 The 'deleted', 'unknown' and 'ignored' properties are only
71 relevant to the working copy.
72 relevant to the working copy.
72 '''
73 '''
73
74
74 modified = attr.ib(default=attr.Factory(list))
75 modified = attr.ib(default=attr.Factory(list))
75 added = attr.ib(default=attr.Factory(list))
76 added = attr.ib(default=attr.Factory(list))
76 removed = attr.ib(default=attr.Factory(list))
77 removed = attr.ib(default=attr.Factory(list))
77 deleted = attr.ib(default=attr.Factory(list))
78 deleted = attr.ib(default=attr.Factory(list))
78 unknown = attr.ib(default=attr.Factory(list))
79 unknown = attr.ib(default=attr.Factory(list))
79 ignored = attr.ib(default=attr.Factory(list))
80 ignored = attr.ib(default=attr.Factory(list))
80 clean = attr.ib(default=attr.Factory(list))
81 clean = attr.ib(default=attr.Factory(list))
81
82
82 def __iter__(self):
83 def __iter__(self):
83 yield self.modified
84 yield self.modified
84 yield self.added
85 yield self.added
85 yield self.removed
86 yield self.removed
86 yield self.deleted
87 yield self.deleted
87 yield self.unknown
88 yield self.unknown
88 yield self.ignored
89 yield self.ignored
89 yield self.clean
90 yield self.clean
90
91
91 def __repr__(self):
92 def __repr__(self):
92 return (
93 return (
93 r'<status modified=%s, added=%s, removed=%s, deleted=%s, '
94 r'<status modified=%s, added=%s, removed=%s, deleted=%s, '
94 r'unknown=%s, ignored=%s, clean=%s>'
95 r'unknown=%s, ignored=%s, clean=%s>'
95 ) % tuple(pycompat.sysstr(stringutil.pprint(v)) for v in self)
96 ) % tuple(pycompat.sysstr(stringutil.pprint(v)) for v in self)
96
97
97
98
98 def itersubrepos(ctx1, ctx2):
99 def itersubrepos(ctx1, ctx2):
99 """find subrepos in ctx1 or ctx2"""
100 """find subrepos in ctx1 or ctx2"""
100 # Create a (subpath, ctx) mapping where we prefer subpaths from
101 # Create a (subpath, ctx) mapping where we prefer subpaths from
101 # ctx1. The subpaths from ctx2 are important when the .hgsub file
102 # ctx1. The subpaths from ctx2 are important when the .hgsub file
102 # has been modified (in ctx2) but not yet committed (in ctx1).
103 # has been modified (in ctx2) but not yet committed (in ctx1).
103 subpaths = dict.fromkeys(ctx2.substate, ctx2)
104 subpaths = dict.fromkeys(ctx2.substate, ctx2)
104 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
105 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
105
106
106 missing = set()
107 missing = set()
107
108
108 for subpath in ctx2.substate:
109 for subpath in ctx2.substate:
109 if subpath not in ctx1.substate:
110 if subpath not in ctx1.substate:
110 del subpaths[subpath]
111 del subpaths[subpath]
111 missing.add(subpath)
112 missing.add(subpath)
112
113
113 for subpath, ctx in sorted(pycompat.iteritems(subpaths)):
114 for subpath, ctx in sorted(pycompat.iteritems(subpaths)):
114 yield subpath, ctx.sub(subpath)
115 yield subpath, ctx.sub(subpath)
115
116
116 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
117 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
117 # status and diff will have an accurate result when it does
118 # status and diff will have an accurate result when it does
118 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
119 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
119 # against itself.
120 # against itself.
120 for subpath in missing:
121 for subpath in missing:
121 yield subpath, ctx2.nullsub(subpath, ctx1)
122 yield subpath, ctx2.nullsub(subpath, ctx1)
122
123
123
124
124 def nochangesfound(ui, repo, excluded=None):
125 def nochangesfound(ui, repo, excluded=None):
125 '''Report no changes for push/pull, excluded is None or a list of
126 '''Report no changes for push/pull, excluded is None or a list of
126 nodes excluded from the push/pull.
127 nodes excluded from the push/pull.
127 '''
128 '''
128 secretlist = []
129 secretlist = []
129 if excluded:
130 if excluded:
130 for n in excluded:
131 for n in excluded:
131 ctx = repo[n]
132 ctx = repo[n]
132 if ctx.phase() >= phases.secret and not ctx.extinct():
133 if ctx.phase() >= phases.secret and not ctx.extinct():
133 secretlist.append(n)
134 secretlist.append(n)
134
135
135 if secretlist:
136 if secretlist:
136 ui.status(
137 ui.status(
137 _(b"no changes found (ignored %d secret changesets)\n")
138 _(b"no changes found (ignored %d secret changesets)\n")
138 % len(secretlist)
139 % len(secretlist)
139 )
140 )
140 else:
141 else:
141 ui.status(_(b"no changes found\n"))
142 ui.status(_(b"no changes found\n"))
142
143
143
144
144 def callcatch(ui, func):
145 def callcatch(ui, func):
145 """call func() with global exception handling
146 """call func() with global exception handling
146
147
147 return func() if no exception happens. otherwise do some error handling
148 return func() if no exception happens. otherwise do some error handling
148 and return an exit code accordingly. does not handle all exceptions.
149 and return an exit code accordingly. does not handle all exceptions.
149 """
150 """
150 try:
151 try:
151 try:
152 try:
152 return func()
153 return func()
153 except: # re-raises
154 except: # re-raises
154 ui.traceback()
155 ui.traceback()
155 raise
156 raise
156 # Global exception handling, alphabetically
157 # Global exception handling, alphabetically
157 # Mercurial-specific first, followed by built-in and library exceptions
158 # Mercurial-specific first, followed by built-in and library exceptions
158 except error.LockHeld as inst:
159 except error.LockHeld as inst:
159 if inst.errno == errno.ETIMEDOUT:
160 if inst.errno == errno.ETIMEDOUT:
160 reason = _(b'timed out waiting for lock held by %r') % (
161 reason = _(b'timed out waiting for lock held by %r') % (
161 pycompat.bytestr(inst.locker)
162 pycompat.bytestr(inst.locker)
162 )
163 )
163 else:
164 else:
164 reason = _(b'lock held by %r') % inst.locker
165 reason = _(b'lock held by %r') % inst.locker
165 ui.error(
166 ui.error(
166 _(b"abort: %s: %s\n")
167 _(b"abort: %s: %s\n")
167 % (inst.desc or stringutil.forcebytestr(inst.filename), reason)
168 % (inst.desc or stringutil.forcebytestr(inst.filename), reason)
168 )
169 )
169 if not inst.locker:
170 if not inst.locker:
170 ui.error(_(b"(lock might be very busy)\n"))
171 ui.error(_(b"(lock might be very busy)\n"))
171 except error.LockUnavailable as inst:
172 except error.LockUnavailable as inst:
172 ui.error(
173 ui.error(
173 _(b"abort: could not lock %s: %s\n")
174 _(b"abort: could not lock %s: %s\n")
174 % (
175 % (
175 inst.desc or stringutil.forcebytestr(inst.filename),
176 inst.desc or stringutil.forcebytestr(inst.filename),
176 encoding.strtolocal(inst.strerror),
177 encoding.strtolocal(inst.strerror),
177 )
178 )
178 )
179 )
179 except error.OutOfBandError as inst:
180 except error.OutOfBandError as inst:
180 if inst.args:
181 if inst.args:
181 msg = _(b"abort: remote error:\n")
182 msg = _(b"abort: remote error:\n")
182 else:
183 else:
183 msg = _(b"abort: remote error\n")
184 msg = _(b"abort: remote error\n")
184 ui.error(msg)
185 ui.error(msg)
185 if inst.args:
186 if inst.args:
186 ui.error(b''.join(inst.args))
187 ui.error(b''.join(inst.args))
187 if inst.hint:
188 if inst.hint:
188 ui.error(b'(%s)\n' % inst.hint)
189 ui.error(b'(%s)\n' % inst.hint)
189 except error.RepoError as inst:
190 except error.RepoError as inst:
190 ui.error(_(b"abort: %s!\n") % inst)
191 ui.error(_(b"abort: %s!\n") % inst)
191 if inst.hint:
192 if inst.hint:
192 ui.error(_(b"(%s)\n") % inst.hint)
193 ui.error(_(b"(%s)\n") % inst.hint)
193 except error.ResponseError as inst:
194 except error.ResponseError as inst:
194 ui.error(_(b"abort: %s") % inst.args[0])
195 ui.error(_(b"abort: %s") % inst.args[0])
195 msg = inst.args[1]
196 msg = inst.args[1]
196 if isinstance(msg, type(u'')):
197 if isinstance(msg, type(u'')):
197 msg = pycompat.sysbytes(msg)
198 msg = pycompat.sysbytes(msg)
198 if not isinstance(msg, bytes):
199 if not isinstance(msg, bytes):
199 ui.error(b" %r\n" % (msg,))
200 ui.error(b" %r\n" % (msg,))
200 elif not msg:
201 elif not msg:
201 ui.error(_(b" empty string\n"))
202 ui.error(_(b" empty string\n"))
202 else:
203 else:
203 ui.error(b"\n%r\n" % pycompat.bytestr(stringutil.ellipsis(msg)))
204 ui.error(b"\n%r\n" % pycompat.bytestr(stringutil.ellipsis(msg)))
204 except error.CensoredNodeError as inst:
205 except error.CensoredNodeError as inst:
205 ui.error(_(b"abort: file censored %s!\n") % inst)
206 ui.error(_(b"abort: file censored %s!\n") % inst)
206 except error.StorageError as inst:
207 except error.StorageError as inst:
207 ui.error(_(b"abort: %s!\n") % inst)
208 ui.error(_(b"abort: %s!\n") % inst)
208 if inst.hint:
209 if inst.hint:
209 ui.error(_(b"(%s)\n") % inst.hint)
210 ui.error(_(b"(%s)\n") % inst.hint)
210 except error.InterventionRequired as inst:
211 except error.InterventionRequired as inst:
211 ui.error(b"%s\n" % inst)
212 ui.error(b"%s\n" % inst)
212 if inst.hint:
213 if inst.hint:
213 ui.error(_(b"(%s)\n") % inst.hint)
214 ui.error(_(b"(%s)\n") % inst.hint)
214 return 1
215 return 1
215 except error.WdirUnsupported:
216 except error.WdirUnsupported:
216 ui.error(_(b"abort: working directory revision cannot be specified\n"))
217 ui.error(_(b"abort: working directory revision cannot be specified\n"))
217 except error.Abort as inst:
218 except error.Abort as inst:
218 ui.error(_(b"abort: %s\n") % inst)
219 ui.error(_(b"abort: %s\n") % inst)
219 if inst.hint:
220 if inst.hint:
220 ui.error(_(b"(%s)\n") % inst.hint)
221 ui.error(_(b"(%s)\n") % inst.hint)
221 except ImportError as inst:
222 except ImportError as inst:
222 ui.error(_(b"abort: %s!\n") % stringutil.forcebytestr(inst))
223 ui.error(_(b"abort: %s!\n") % stringutil.forcebytestr(inst))
223 m = stringutil.forcebytestr(inst).split()[-1]
224 m = stringutil.forcebytestr(inst).split()[-1]
224 if m in b"mpatch bdiff".split():
225 if m in b"mpatch bdiff".split():
225 ui.error(_(b"(did you forget to compile extensions?)\n"))
226 ui.error(_(b"(did you forget to compile extensions?)\n"))
226 elif m in b"zlib".split():
227 elif m in b"zlib".split():
227 ui.error(_(b"(is your Python install correct?)\n"))
228 ui.error(_(b"(is your Python install correct?)\n"))
228 except (IOError, OSError) as inst:
229 except (IOError, OSError) as inst:
229 if util.safehasattr(inst, b"code"): # HTTPError
230 if util.safehasattr(inst, b"code"): # HTTPError
230 ui.error(_(b"abort: %s\n") % stringutil.forcebytestr(inst))
231 ui.error(_(b"abort: %s\n") % stringutil.forcebytestr(inst))
231 elif util.safehasattr(inst, b"reason"): # URLError or SSLError
232 elif util.safehasattr(inst, b"reason"): # URLError or SSLError
232 try: # usually it is in the form (errno, strerror)
233 try: # usually it is in the form (errno, strerror)
233 reason = inst.reason.args[1]
234 reason = inst.reason.args[1]
234 except (AttributeError, IndexError):
235 except (AttributeError, IndexError):
235 # it might be anything, for example a string
236 # it might be anything, for example a string
236 reason = inst.reason
237 reason = inst.reason
237 if isinstance(reason, pycompat.unicode):
238 if isinstance(reason, pycompat.unicode):
238 # SSLError of Python 2.7.9 contains a unicode
239 # SSLError of Python 2.7.9 contains a unicode
239 reason = encoding.unitolocal(reason)
240 reason = encoding.unitolocal(reason)
240 ui.error(_(b"abort: error: %s\n") % stringutil.forcebytestr(reason))
241 ui.error(_(b"abort: error: %s\n") % stringutil.forcebytestr(reason))
241 elif (
242 elif (
242 util.safehasattr(inst, b"args")
243 util.safehasattr(inst, b"args")
243 and inst.args
244 and inst.args
244 and inst.args[0] == errno.EPIPE
245 and inst.args[0] == errno.EPIPE
245 ):
246 ):
246 pass
247 pass
247 elif getattr(inst, "strerror", None): # common IOError or OSError
248 elif getattr(inst, "strerror", None): # common IOError or OSError
248 if getattr(inst, "filename", None) is not None:
249 if getattr(inst, "filename", None) is not None:
249 ui.error(
250 ui.error(
250 _(b"abort: %s: '%s'\n")
251 _(b"abort: %s: '%s'\n")
251 % (
252 % (
252 encoding.strtolocal(inst.strerror),
253 encoding.strtolocal(inst.strerror),
253 stringutil.forcebytestr(inst.filename),
254 stringutil.forcebytestr(inst.filename),
254 )
255 )
255 )
256 )
256 else:
257 else:
257 ui.error(_(b"abort: %s\n") % encoding.strtolocal(inst.strerror))
258 ui.error(_(b"abort: %s\n") % encoding.strtolocal(inst.strerror))
258 else: # suspicious IOError
259 else: # suspicious IOError
259 raise
260 raise
260 except MemoryError:
261 except MemoryError:
261 ui.error(_(b"abort: out of memory\n"))
262 ui.error(_(b"abort: out of memory\n"))
262 except SystemExit as inst:
263 except SystemExit as inst:
263 # Commands shouldn't sys.exit directly, but give a return code.
264 # Commands shouldn't sys.exit directly, but give a return code.
264 # Just in case catch this and and pass exit code to caller.
265 # Just in case catch this and and pass exit code to caller.
265 return inst.code
266 return inst.code
266
267
267 return -1
268 return -1
268
269
269
270
270 def checknewlabel(repo, lbl, kind):
271 def checknewlabel(repo, lbl, kind):
271 # Do not use the "kind" parameter in ui output.
272 # Do not use the "kind" parameter in ui output.
272 # It makes strings difficult to translate.
273 # It makes strings difficult to translate.
273 if lbl in [b'tip', b'.', b'null']:
274 if lbl in [b'tip', b'.', b'null']:
274 raise error.Abort(_(b"the name '%s' is reserved") % lbl)
275 raise error.Abort(_(b"the name '%s' is reserved") % lbl)
275 for c in (b':', b'\0', b'\n', b'\r'):
276 for c in (b':', b'\0', b'\n', b'\r'):
276 if c in lbl:
277 if c in lbl:
277 raise error.Abort(
278 raise error.Abort(
278 _(b"%r cannot be used in a name") % pycompat.bytestr(c)
279 _(b"%r cannot be used in a name") % pycompat.bytestr(c)
279 )
280 )
280 try:
281 try:
281 int(lbl)
282 int(lbl)
282 raise error.Abort(_(b"cannot use an integer as a name"))
283 raise error.Abort(_(b"cannot use an integer as a name"))
283 except ValueError:
284 except ValueError:
284 pass
285 pass
285 if lbl.strip() != lbl:
286 if lbl.strip() != lbl:
286 raise error.Abort(_(b"leading or trailing whitespace in name %r") % lbl)
287 raise error.Abort(_(b"leading or trailing whitespace in name %r") % lbl)
287
288
288
289
289 def checkfilename(f):
290 def checkfilename(f):
290 '''Check that the filename f is an acceptable filename for a tracked file'''
291 '''Check that the filename f is an acceptable filename for a tracked file'''
291 if b'\r' in f or b'\n' in f:
292 if b'\r' in f or b'\n' in f:
292 raise error.Abort(
293 raise error.Abort(
293 _(b"'\\n' and '\\r' disallowed in filenames: %r")
294 _(b"'\\n' and '\\r' disallowed in filenames: %r")
294 % pycompat.bytestr(f)
295 % pycompat.bytestr(f)
295 )
296 )
296
297
297
298
298 def checkportable(ui, f):
299 def checkportable(ui, f):
299 '''Check if filename f is portable and warn or abort depending on config'''
300 '''Check if filename f is portable and warn or abort depending on config'''
300 checkfilename(f)
301 checkfilename(f)
301 abort, warn = checkportabilityalert(ui)
302 abort, warn = checkportabilityalert(ui)
302 if abort or warn:
303 if abort or warn:
303 msg = util.checkwinfilename(f)
304 msg = util.checkwinfilename(f)
304 if msg:
305 if msg:
305 msg = b"%s: %s" % (msg, procutil.shellquote(f))
306 msg = b"%s: %s" % (msg, procutil.shellquote(f))
306 if abort:
307 if abort:
307 raise error.Abort(msg)
308 raise error.Abort(msg)
308 ui.warn(_(b"warning: %s\n") % msg)
309 ui.warn(_(b"warning: %s\n") % msg)
309
310
310
311
311 def checkportabilityalert(ui):
312 def checkportabilityalert(ui):
312 '''check if the user's config requests nothing, a warning, or abort for
313 '''check if the user's config requests nothing, a warning, or abort for
313 non-portable filenames'''
314 non-portable filenames'''
314 val = ui.config(b'ui', b'portablefilenames')
315 val = ui.config(b'ui', b'portablefilenames')
315 lval = val.lower()
316 lval = val.lower()
316 bval = stringutil.parsebool(val)
317 bval = stringutil.parsebool(val)
317 abort = pycompat.iswindows or lval == b'abort'
318 abort = pycompat.iswindows or lval == b'abort'
318 warn = bval or lval == b'warn'
319 warn = bval or lval == b'warn'
319 if bval is None and not (warn or abort or lval == b'ignore'):
320 if bval is None and not (warn or abort or lval == b'ignore'):
320 raise error.ConfigError(
321 raise error.ConfigError(
321 _(b"ui.portablefilenames value is invalid ('%s')") % val
322 _(b"ui.portablefilenames value is invalid ('%s')") % val
322 )
323 )
323 return abort, warn
324 return abort, warn
324
325
325
326
326 class casecollisionauditor(object):
327 class casecollisionauditor(object):
327 def __init__(self, ui, abort, dirstate):
328 def __init__(self, ui, abort, dirstate):
328 self._ui = ui
329 self._ui = ui
329 self._abort = abort
330 self._abort = abort
330 allfiles = b'\0'.join(dirstate)
331 allfiles = b'\0'.join(dirstate)
331 self._loweredfiles = set(encoding.lower(allfiles).split(b'\0'))
332 self._loweredfiles = set(encoding.lower(allfiles).split(b'\0'))
332 self._dirstate = dirstate
333 self._dirstate = dirstate
333 # The purpose of _newfiles is so that we don't complain about
334 # The purpose of _newfiles is so that we don't complain about
334 # case collisions if someone were to call this object with the
335 # case collisions if someone were to call this object with the
335 # same filename twice.
336 # same filename twice.
336 self._newfiles = set()
337 self._newfiles = set()
337
338
338 def __call__(self, f):
339 def __call__(self, f):
339 if f in self._newfiles:
340 if f in self._newfiles:
340 return
341 return
341 fl = encoding.lower(f)
342 fl = encoding.lower(f)
342 if fl in self._loweredfiles and f not in self._dirstate:
343 if fl in self._loweredfiles and f not in self._dirstate:
343 msg = _(b'possible case-folding collision for %s') % f
344 msg = _(b'possible case-folding collision for %s') % f
344 if self._abort:
345 if self._abort:
345 raise error.Abort(msg)
346 raise error.Abort(msg)
346 self._ui.warn(_(b"warning: %s\n") % msg)
347 self._ui.warn(_(b"warning: %s\n") % msg)
347 self._loweredfiles.add(fl)
348 self._loweredfiles.add(fl)
348 self._newfiles.add(f)
349 self._newfiles.add(f)
349
350
350
351
351 def filteredhash(repo, maxrev):
352 def filteredhash(repo, maxrev):
352 """build hash of filtered revisions in the current repoview.
353 """build hash of filtered revisions in the current repoview.
353
354
354 Multiple caches perform up-to-date validation by checking that the
355 Multiple caches perform up-to-date validation by checking that the
355 tiprev and tipnode stored in the cache file match the current repository.
356 tiprev and tipnode stored in the cache file match the current repository.
356 However, this is not sufficient for validating repoviews because the set
357 However, this is not sufficient for validating repoviews because the set
357 of revisions in the view may change without the repository tiprev and
358 of revisions in the view may change without the repository tiprev and
358 tipnode changing.
359 tipnode changing.
359
360
360 This function hashes all the revs filtered from the view and returns
361 This function hashes all the revs filtered from the view and returns
361 that SHA-1 digest.
362 that SHA-1 digest.
362 """
363 """
363 cl = repo.changelog
364 cl = repo.changelog
364 if not cl.filteredrevs:
365 if not cl.filteredrevs:
365 return None
366 return None
366 key = None
367 key = None
367 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
368 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
368 if revs:
369 if revs:
369 s = hashutil.sha1()
370 s = hashutil.sha1()
370 for rev in revs:
371 for rev in revs:
371 s.update(b'%d;' % rev)
372 s.update(b'%d;' % rev)
372 key = s.digest()
373 key = s.digest()
373 return key
374 return key
374
375
375
376
376 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
377 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
377 '''yield every hg repository under path, always recursively.
378 '''yield every hg repository under path, always recursively.
378 The recurse flag will only control recursion into repo working dirs'''
379 The recurse flag will only control recursion into repo working dirs'''
379
380
380 def errhandler(err):
381 def errhandler(err):
381 if err.filename == path:
382 if err.filename == path:
382 raise err
383 raise err
383
384
384 samestat = getattr(os.path, 'samestat', None)
385 samestat = getattr(os.path, 'samestat', None)
385 if followsym and samestat is not None:
386 if followsym and samestat is not None:
386
387
387 def adddir(dirlst, dirname):
388 def adddir(dirlst, dirname):
388 dirstat = os.stat(dirname)
389 dirstat = os.stat(dirname)
389 match = any(samestat(dirstat, lstdirstat) for lstdirstat in dirlst)
390 match = any(samestat(dirstat, lstdirstat) for lstdirstat in dirlst)
390 if not match:
391 if not match:
391 dirlst.append(dirstat)
392 dirlst.append(dirstat)
392 return not match
393 return not match
393
394
394 else:
395 else:
395 followsym = False
396 followsym = False
396
397
397 if (seen_dirs is None) and followsym:
398 if (seen_dirs is None) and followsym:
398 seen_dirs = []
399 seen_dirs = []
399 adddir(seen_dirs, path)
400 adddir(seen_dirs, path)
400 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
401 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
401 dirs.sort()
402 dirs.sort()
402 if b'.hg' in dirs:
403 if b'.hg' in dirs:
403 yield root # found a repository
404 yield root # found a repository
404 qroot = os.path.join(root, b'.hg', b'patches')
405 qroot = os.path.join(root, b'.hg', b'patches')
405 if os.path.isdir(os.path.join(qroot, b'.hg')):
406 if os.path.isdir(os.path.join(qroot, b'.hg')):
406 yield qroot # we have a patch queue repo here
407 yield qroot # we have a patch queue repo here
407 if recurse:
408 if recurse:
408 # avoid recursing inside the .hg directory
409 # avoid recursing inside the .hg directory
409 dirs.remove(b'.hg')
410 dirs.remove(b'.hg')
410 else:
411 else:
411 dirs[:] = [] # don't descend further
412 dirs[:] = [] # don't descend further
412 elif followsym:
413 elif followsym:
413 newdirs = []
414 newdirs = []
414 for d in dirs:
415 for d in dirs:
415 fname = os.path.join(root, d)
416 fname = os.path.join(root, d)
416 if adddir(seen_dirs, fname):
417 if adddir(seen_dirs, fname):
417 if os.path.islink(fname):
418 if os.path.islink(fname):
418 for hgname in walkrepos(fname, True, seen_dirs):
419 for hgname in walkrepos(fname, True, seen_dirs):
419 yield hgname
420 yield hgname
420 else:
421 else:
421 newdirs.append(d)
422 newdirs.append(d)
422 dirs[:] = newdirs
423 dirs[:] = newdirs
423
424
424
425
425 def binnode(ctx):
426 def binnode(ctx):
426 """Return binary node id for a given basectx"""
427 """Return binary node id for a given basectx"""
427 node = ctx.node()
428 node = ctx.node()
428 if node is None:
429 if node is None:
429 return wdirid
430 return wdirid
430 return node
431 return node
431
432
432
433
433 def intrev(ctx):
434 def intrev(ctx):
434 """Return integer for a given basectx that can be used in comparison or
435 """Return integer for a given basectx that can be used in comparison or
435 arithmetic operation"""
436 arithmetic operation"""
436 rev = ctx.rev()
437 rev = ctx.rev()
437 if rev is None:
438 if rev is None:
438 return wdirrev
439 return wdirrev
439 return rev
440 return rev
440
441
441
442
442 def formatchangeid(ctx):
443 def formatchangeid(ctx):
443 """Format changectx as '{rev}:{node|formatnode}', which is the default
444 """Format changectx as '{rev}:{node|formatnode}', which is the default
444 template provided by logcmdutil.changesettemplater"""
445 template provided by logcmdutil.changesettemplater"""
445 repo = ctx.repo()
446 repo = ctx.repo()
446 return formatrevnode(repo.ui, intrev(ctx), binnode(ctx))
447 return formatrevnode(repo.ui, intrev(ctx), binnode(ctx))
447
448
448
449
449 def formatrevnode(ui, rev, node):
450 def formatrevnode(ui, rev, node):
450 """Format given revision and node depending on the current verbosity"""
451 """Format given revision and node depending on the current verbosity"""
451 if ui.debugflag:
452 if ui.debugflag:
452 hexfunc = hex
453 hexfunc = hex
453 else:
454 else:
454 hexfunc = short
455 hexfunc = short
455 return b'%d:%s' % (rev, hexfunc(node))
456 return b'%d:%s' % (rev, hexfunc(node))
456
457
457
458
458 def resolvehexnodeidprefix(repo, prefix):
459 def resolvehexnodeidprefix(repo, prefix):
459 if prefix.startswith(b'x'):
460 if prefix.startswith(b'x'):
460 prefix = prefix[1:]
461 prefix = prefix[1:]
461 try:
462 try:
462 # Uses unfiltered repo because it's faster when prefix is ambiguous/
463 # Uses unfiltered repo because it's faster when prefix is ambiguous/
463 # This matches the shortesthexnodeidprefix() function below.
464 # This matches the shortesthexnodeidprefix() function below.
464 node = repo.unfiltered().changelog._partialmatch(prefix)
465 node = repo.unfiltered().changelog._partialmatch(prefix)
465 except error.AmbiguousPrefixLookupError:
466 except error.AmbiguousPrefixLookupError:
466 revset = repo.ui.config(
467 revset = repo.ui.config(
467 b'experimental', b'revisions.disambiguatewithin'
468 b'experimental', b'revisions.disambiguatewithin'
468 )
469 )
469 if revset:
470 if revset:
470 # Clear config to avoid infinite recursion
471 # Clear config to avoid infinite recursion
471 configoverrides = {
472 configoverrides = {
472 (b'experimental', b'revisions.disambiguatewithin'): None
473 (b'experimental', b'revisions.disambiguatewithin'): None
473 }
474 }
474 with repo.ui.configoverride(configoverrides):
475 with repo.ui.configoverride(configoverrides):
475 revs = repo.anyrevs([revset], user=True)
476 revs = repo.anyrevs([revset], user=True)
476 matches = []
477 matches = []
477 for rev in revs:
478 for rev in revs:
478 node = repo.changelog.node(rev)
479 node = repo.changelog.node(rev)
479 if hex(node).startswith(prefix):
480 if hex(node).startswith(prefix):
480 matches.append(node)
481 matches.append(node)
481 if len(matches) == 1:
482 if len(matches) == 1:
482 return matches[0]
483 return matches[0]
483 raise
484 raise
484 if node is None:
485 if node is None:
485 return
486 return
486 repo.changelog.rev(node) # make sure node isn't filtered
487 repo.changelog.rev(node) # make sure node isn't filtered
487 return node
488 return node
488
489
489
490
490 def mayberevnum(repo, prefix):
491 def mayberevnum(repo, prefix):
491 """Checks if the given prefix may be mistaken for a revision number"""
492 """Checks if the given prefix may be mistaken for a revision number"""
492 try:
493 try:
493 i = int(prefix)
494 i = int(prefix)
494 # if we are a pure int, then starting with zero will not be
495 # if we are a pure int, then starting with zero will not be
495 # confused as a rev; or, obviously, if the int is larger
496 # confused as a rev; or, obviously, if the int is larger
496 # than the value of the tip rev. We still need to disambiguate if
497 # than the value of the tip rev. We still need to disambiguate if
497 # prefix == '0', since that *is* a valid revnum.
498 # prefix == '0', since that *is* a valid revnum.
498 if (prefix != b'0' and prefix[0:1] == b'0') or i >= len(repo):
499 if (prefix != b'0' and prefix[0:1] == b'0') or i >= len(repo):
499 return False
500 return False
500 return True
501 return True
501 except ValueError:
502 except ValueError:
502 return False
503 return False
503
504
504
505
505 def shortesthexnodeidprefix(repo, node, minlength=1, cache=None):
506 def shortesthexnodeidprefix(repo, node, minlength=1, cache=None):
506 """Find the shortest unambiguous prefix that matches hexnode.
507 """Find the shortest unambiguous prefix that matches hexnode.
507
508
508 If "cache" is not None, it must be a dictionary that can be used for
509 If "cache" is not None, it must be a dictionary that can be used for
509 caching between calls to this method.
510 caching between calls to this method.
510 """
511 """
511 # _partialmatch() of filtered changelog could take O(len(repo)) time,
512 # _partialmatch() of filtered changelog could take O(len(repo)) time,
512 # which would be unacceptably slow. so we look for hash collision in
513 # which would be unacceptably slow. so we look for hash collision in
513 # unfiltered space, which means some hashes may be slightly longer.
514 # unfiltered space, which means some hashes may be slightly longer.
514
515
515 minlength = max(minlength, 1)
516 minlength = max(minlength, 1)
516
517
517 def disambiguate(prefix):
518 def disambiguate(prefix):
518 """Disambiguate against revnums."""
519 """Disambiguate against revnums."""
519 if repo.ui.configbool(b'experimental', b'revisions.prefixhexnode'):
520 if repo.ui.configbool(b'experimental', b'revisions.prefixhexnode'):
520 if mayberevnum(repo, prefix):
521 if mayberevnum(repo, prefix):
521 return b'x' + prefix
522 return b'x' + prefix
522 else:
523 else:
523 return prefix
524 return prefix
524
525
525 hexnode = hex(node)
526 hexnode = hex(node)
526 for length in range(len(prefix), len(hexnode) + 1):
527 for length in range(len(prefix), len(hexnode) + 1):
527 prefix = hexnode[:length]
528 prefix = hexnode[:length]
528 if not mayberevnum(repo, prefix):
529 if not mayberevnum(repo, prefix):
529 return prefix
530 return prefix
530
531
531 cl = repo.unfiltered().changelog
532 cl = repo.unfiltered().changelog
532 revset = repo.ui.config(b'experimental', b'revisions.disambiguatewithin')
533 revset = repo.ui.config(b'experimental', b'revisions.disambiguatewithin')
533 if revset:
534 if revset:
534 revs = None
535 revs = None
535 if cache is not None:
536 if cache is not None:
536 revs = cache.get(b'disambiguationrevset')
537 revs = cache.get(b'disambiguationrevset')
537 if revs is None:
538 if revs is None:
538 revs = repo.anyrevs([revset], user=True)
539 revs = repo.anyrevs([revset], user=True)
539 if cache is not None:
540 if cache is not None:
540 cache[b'disambiguationrevset'] = revs
541 cache[b'disambiguationrevset'] = revs
541 if cl.rev(node) in revs:
542 if cl.rev(node) in revs:
542 hexnode = hex(node)
543 hexnode = hex(node)
543 nodetree = None
544 nodetree = None
544 if cache is not None:
545 if cache is not None:
545 nodetree = cache.get(b'disambiguationnodetree')
546 nodetree = cache.get(b'disambiguationnodetree')
546 if not nodetree:
547 if not nodetree:
547 if util.safehasattr(parsers, 'nodetree'):
548 if util.safehasattr(parsers, 'nodetree'):
548 # The CExt is the only implementation to provide a nodetree
549 # The CExt is the only implementation to provide a nodetree
549 # class so far.
550 # class so far.
550 index = cl.index
551 index = cl.index
551 if util.safehasattr(index, 'get_cindex'):
552 if util.safehasattr(index, 'get_cindex'):
552 # the rust wrapped need to give access to its internal index
553 # the rust wrapped need to give access to its internal index
553 index = index.get_cindex()
554 index = index.get_cindex()
554 nodetree = parsers.nodetree(index, len(revs))
555 nodetree = parsers.nodetree(index, len(revs))
555 for r in revs:
556 for r in revs:
556 nodetree.insert(r)
557 nodetree.insert(r)
557 if cache is not None:
558 if cache is not None:
558 cache[b'disambiguationnodetree'] = nodetree
559 cache[b'disambiguationnodetree'] = nodetree
559 if nodetree is not None:
560 if nodetree is not None:
560 length = max(nodetree.shortest(node), minlength)
561 length = max(nodetree.shortest(node), minlength)
561 prefix = hexnode[:length]
562 prefix = hexnode[:length]
562 return disambiguate(prefix)
563 return disambiguate(prefix)
563 for length in range(minlength, len(hexnode) + 1):
564 for length in range(minlength, len(hexnode) + 1):
564 matches = []
565 matches = []
565 prefix = hexnode[:length]
566 prefix = hexnode[:length]
566 for rev in revs:
567 for rev in revs:
567 otherhexnode = repo[rev].hex()
568 otherhexnode = repo[rev].hex()
568 if prefix == otherhexnode[:length]:
569 if prefix == otherhexnode[:length]:
569 matches.append(otherhexnode)
570 matches.append(otherhexnode)
570 if len(matches) == 1:
571 if len(matches) == 1:
571 return disambiguate(prefix)
572 return disambiguate(prefix)
572
573
573 try:
574 try:
574 return disambiguate(cl.shortest(node, minlength))
575 return disambiguate(cl.shortest(node, minlength))
575 except error.LookupError:
576 except error.LookupError:
576 raise error.RepoLookupError()
577 raise error.RepoLookupError()
577
578
578
579
579 def isrevsymbol(repo, symbol):
580 def isrevsymbol(repo, symbol):
580 """Checks if a symbol exists in the repo.
581 """Checks if a symbol exists in the repo.
581
582
582 See revsymbol() for details. Raises error.AmbiguousPrefixLookupError if the
583 See revsymbol() for details. Raises error.AmbiguousPrefixLookupError if the
583 symbol is an ambiguous nodeid prefix.
584 symbol is an ambiguous nodeid prefix.
584 """
585 """
585 try:
586 try:
586 revsymbol(repo, symbol)
587 revsymbol(repo, symbol)
587 return True
588 return True
588 except error.RepoLookupError:
589 except error.RepoLookupError:
589 return False
590 return False
590
591
591
592
592 def revsymbol(repo, symbol):
593 def revsymbol(repo, symbol):
593 """Returns a context given a single revision symbol (as string).
594 """Returns a context given a single revision symbol (as string).
594
595
595 This is similar to revsingle(), but accepts only a single revision symbol,
596 This is similar to revsingle(), but accepts only a single revision symbol,
596 i.e. things like ".", "tip", "1234", "deadbeef", "my-bookmark" work, but
597 i.e. things like ".", "tip", "1234", "deadbeef", "my-bookmark" work, but
597 not "max(public())".
598 not "max(public())".
598 """
599 """
599 if not isinstance(symbol, bytes):
600 if not isinstance(symbol, bytes):
600 msg = (
601 msg = (
601 b"symbol (%s of type %s) was not a string, did you mean "
602 b"symbol (%s of type %s) was not a string, did you mean "
602 b"repo[symbol]?" % (symbol, type(symbol))
603 b"repo[symbol]?" % (symbol, type(symbol))
603 )
604 )
604 raise error.ProgrammingError(msg)
605 raise error.ProgrammingError(msg)
605 try:
606 try:
606 if symbol in (b'.', b'tip', b'null'):
607 if symbol in (b'.', b'tip', b'null'):
607 return repo[symbol]
608 return repo[symbol]
608
609
609 try:
610 try:
610 r = int(symbol)
611 r = int(symbol)
611 if b'%d' % r != symbol:
612 if b'%d' % r != symbol:
612 raise ValueError
613 raise ValueError
613 l = len(repo.changelog)
614 l = len(repo.changelog)
614 if r < 0:
615 if r < 0:
615 r += l
616 r += l
616 if r < 0 or r >= l and r != wdirrev:
617 if r < 0 or r >= l and r != wdirrev:
617 raise ValueError
618 raise ValueError
618 return repo[r]
619 return repo[r]
619 except error.FilteredIndexError:
620 except error.FilteredIndexError:
620 raise
621 raise
621 except (ValueError, OverflowError, IndexError):
622 except (ValueError, OverflowError, IndexError):
622 pass
623 pass
623
624
624 if len(symbol) == 40:
625 if len(symbol) == 40:
625 try:
626 try:
626 node = bin(symbol)
627 node = bin(symbol)
627 rev = repo.changelog.rev(node)
628 rev = repo.changelog.rev(node)
628 return repo[rev]
629 return repo[rev]
629 except error.FilteredLookupError:
630 except error.FilteredLookupError:
630 raise
631 raise
631 except (TypeError, LookupError):
632 except (TypeError, LookupError):
632 pass
633 pass
633
634
634 # look up bookmarks through the name interface
635 # look up bookmarks through the name interface
635 try:
636 try:
636 node = repo.names.singlenode(repo, symbol)
637 node = repo.names.singlenode(repo, symbol)
637 rev = repo.changelog.rev(node)
638 rev = repo.changelog.rev(node)
638 return repo[rev]
639 return repo[rev]
639 except KeyError:
640 except KeyError:
640 pass
641 pass
641
642
642 node = resolvehexnodeidprefix(repo, symbol)
643 node = resolvehexnodeidprefix(repo, symbol)
643 if node is not None:
644 if node is not None:
644 rev = repo.changelog.rev(node)
645 rev = repo.changelog.rev(node)
645 return repo[rev]
646 return repo[rev]
646
647
647 raise error.RepoLookupError(_(b"unknown revision '%s'") % symbol)
648 raise error.RepoLookupError(_(b"unknown revision '%s'") % symbol)
648
649
649 except error.WdirUnsupported:
650 except error.WdirUnsupported:
650 return repo[None]
651 return repo[None]
651 except (
652 except (
652 error.FilteredIndexError,
653 error.FilteredIndexError,
653 error.FilteredLookupError,
654 error.FilteredLookupError,
654 error.FilteredRepoLookupError,
655 error.FilteredRepoLookupError,
655 ):
656 ):
656 raise _filterederror(repo, symbol)
657 raise _filterederror(repo, symbol)
657
658
658
659
659 def _filterederror(repo, changeid):
660 def _filterederror(repo, changeid):
660 """build an exception to be raised about a filtered changeid
661 """build an exception to be raised about a filtered changeid
661
662
662 This is extracted in a function to help extensions (eg: evolve) to
663 This is extracted in a function to help extensions (eg: evolve) to
663 experiment with various message variants."""
664 experiment with various message variants."""
664 if repo.filtername.startswith(b'visible'):
665 if repo.filtername.startswith(b'visible'):
665
666
666 # Check if the changeset is obsolete
667 # Check if the changeset is obsolete
667 unfilteredrepo = repo.unfiltered()
668 unfilteredrepo = repo.unfiltered()
668 ctx = revsymbol(unfilteredrepo, changeid)
669 ctx = revsymbol(unfilteredrepo, changeid)
669
670
670 # If the changeset is obsolete, enrich the message with the reason
671 # If the changeset is obsolete, enrich the message with the reason
671 # that made this changeset not visible
672 # that made this changeset not visible
672 if ctx.obsolete():
673 if ctx.obsolete():
673 msg = obsutil._getfilteredreason(repo, changeid, ctx)
674 msg = obsutil._getfilteredreason(repo, changeid, ctx)
674 else:
675 else:
675 msg = _(b"hidden revision '%s'") % changeid
676 msg = _(b"hidden revision '%s'") % changeid
676
677
677 hint = _(b'use --hidden to access hidden revisions')
678 hint = _(b'use --hidden to access hidden revisions')
678
679
679 return error.FilteredRepoLookupError(msg, hint=hint)
680 return error.FilteredRepoLookupError(msg, hint=hint)
680 msg = _(b"filtered revision '%s' (not in '%s' subset)")
681 msg = _(b"filtered revision '%s' (not in '%s' subset)")
681 msg %= (changeid, repo.filtername)
682 msg %= (changeid, repo.filtername)
682 return error.FilteredRepoLookupError(msg)
683 return error.FilteredRepoLookupError(msg)
683
684
684
685
685 def revsingle(repo, revspec, default=b'.', localalias=None):
686 def revsingle(repo, revspec, default=b'.', localalias=None):
686 if not revspec and revspec != 0:
687 if not revspec and revspec != 0:
687 return repo[default]
688 return repo[default]
688
689
689 l = revrange(repo, [revspec], localalias=localalias)
690 l = revrange(repo, [revspec], localalias=localalias)
690 if not l:
691 if not l:
691 raise error.Abort(_(b'empty revision set'))
692 raise error.Abort(_(b'empty revision set'))
692 return repo[l.last()]
693 return repo[l.last()]
693
694
694
695
695 def _pairspec(revspec):
696 def _pairspec(revspec):
696 tree = revsetlang.parse(revspec)
697 tree = revsetlang.parse(revspec)
697 return tree and tree[0] in (
698 return tree and tree[0] in (
698 b'range',
699 b'range',
699 b'rangepre',
700 b'rangepre',
700 b'rangepost',
701 b'rangepost',
701 b'rangeall',
702 b'rangeall',
702 )
703 )
703
704
704
705
705 def revpair(repo, revs):
706 def revpair(repo, revs):
706 if not revs:
707 if not revs:
707 return repo[b'.'], repo[None]
708 return repo[b'.'], repo[None]
708
709
709 l = revrange(repo, revs)
710 l = revrange(repo, revs)
710
711
711 if not l:
712 if not l:
712 raise error.Abort(_(b'empty revision range'))
713 raise error.Abort(_(b'empty revision range'))
713
714
714 first = l.first()
715 first = l.first()
715 second = l.last()
716 second = l.last()
716
717
717 if (
718 if (
718 first == second
719 first == second
719 and len(revs) >= 2
720 and len(revs) >= 2
720 and not all(revrange(repo, [r]) for r in revs)
721 and not all(revrange(repo, [r]) for r in revs)
721 ):
722 ):
722 raise error.Abort(_(b'empty revision on one side of range'))
723 raise error.Abort(_(b'empty revision on one side of range'))
723
724
724 # if top-level is range expression, the result must always be a pair
725 # if top-level is range expression, the result must always be a pair
725 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
726 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
726 return repo[first], repo[None]
727 return repo[first], repo[None]
727
728
728 return repo[first], repo[second]
729 return repo[first], repo[second]
729
730
730
731
731 def revrange(repo, specs, localalias=None):
732 def revrange(repo, specs, localalias=None):
732 """Execute 1 to many revsets and return the union.
733 """Execute 1 to many revsets and return the union.
733
734
734 This is the preferred mechanism for executing revsets using user-specified
735 This is the preferred mechanism for executing revsets using user-specified
735 config options, such as revset aliases.
736 config options, such as revset aliases.
736
737
737 The revsets specified by ``specs`` will be executed via a chained ``OR``
738 The revsets specified by ``specs`` will be executed via a chained ``OR``
738 expression. If ``specs`` is empty, an empty result is returned.
739 expression. If ``specs`` is empty, an empty result is returned.
739
740
740 ``specs`` can contain integers, in which case they are assumed to be
741 ``specs`` can contain integers, in which case they are assumed to be
741 revision numbers.
742 revision numbers.
742
743
743 It is assumed the revsets are already formatted. If you have arguments
744 It is assumed the revsets are already formatted. If you have arguments
744 that need to be expanded in the revset, call ``revsetlang.formatspec()``
745 that need to be expanded in the revset, call ``revsetlang.formatspec()``
745 and pass the result as an element of ``specs``.
746 and pass the result as an element of ``specs``.
746
747
747 Specifying a single revset is allowed.
748 Specifying a single revset is allowed.
748
749
749 Returns a ``smartset.abstractsmartset`` which is a list-like interface over
750 Returns a ``smartset.abstractsmartset`` which is a list-like interface over
750 integer revisions.
751 integer revisions.
751 """
752 """
752 allspecs = []
753 allspecs = []
753 for spec in specs:
754 for spec in specs:
754 if isinstance(spec, int):
755 if isinstance(spec, int):
755 spec = revsetlang.formatspec(b'%d', spec)
756 spec = revsetlang.formatspec(b'%d', spec)
756 allspecs.append(spec)
757 allspecs.append(spec)
757 return repo.anyrevs(allspecs, user=True, localalias=localalias)
758 return repo.anyrevs(allspecs, user=True, localalias=localalias)
758
759
759
760
760 def meaningfulparents(repo, ctx):
761 def meaningfulparents(repo, ctx):
761 """Return list of meaningful (or all if debug) parentrevs for rev.
762 """Return list of meaningful (or all if debug) parentrevs for rev.
762
763
763 For merges (two non-nullrev revisions) both parents are meaningful.
764 For merges (two non-nullrev revisions) both parents are meaningful.
764 Otherwise the first parent revision is considered meaningful if it
765 Otherwise the first parent revision is considered meaningful if it
765 is not the preceding revision.
766 is not the preceding revision.
766 """
767 """
767 parents = ctx.parents()
768 parents = ctx.parents()
768 if len(parents) > 1:
769 if len(parents) > 1:
769 return parents
770 return parents
770 if repo.ui.debugflag:
771 if repo.ui.debugflag:
771 return [parents[0], repo[nullrev]]
772 return [parents[0], repo[nullrev]]
772 if parents[0].rev() >= intrev(ctx) - 1:
773 if parents[0].rev() >= intrev(ctx) - 1:
773 return []
774 return []
774 return parents
775 return parents
775
776
776
777
777 def getuipathfn(repo, legacyrelativevalue=False, forcerelativevalue=None):
778 def getuipathfn(repo, legacyrelativevalue=False, forcerelativevalue=None):
778 """Return a function that produced paths for presenting to the user.
779 """Return a function that produced paths for presenting to the user.
779
780
780 The returned function takes a repo-relative path and produces a path
781 The returned function takes a repo-relative path and produces a path
781 that can be presented in the UI.
782 that can be presented in the UI.
782
783
783 Depending on the value of ui.relative-paths, either a repo-relative or
784 Depending on the value of ui.relative-paths, either a repo-relative or
784 cwd-relative path will be produced.
785 cwd-relative path will be produced.
785
786
786 legacyrelativevalue is the value to use if ui.relative-paths=legacy
787 legacyrelativevalue is the value to use if ui.relative-paths=legacy
787
788
788 If forcerelativevalue is not None, then that value will be used regardless
789 If forcerelativevalue is not None, then that value will be used regardless
789 of what ui.relative-paths is set to.
790 of what ui.relative-paths is set to.
790 """
791 """
791 if forcerelativevalue is not None:
792 if forcerelativevalue is not None:
792 relative = forcerelativevalue
793 relative = forcerelativevalue
793 else:
794 else:
794 config = repo.ui.config(b'ui', b'relative-paths')
795 config = repo.ui.config(b'ui', b'relative-paths')
795 if config == b'legacy':
796 if config == b'legacy':
796 relative = legacyrelativevalue
797 relative = legacyrelativevalue
797 else:
798 else:
798 relative = stringutil.parsebool(config)
799 relative = stringutil.parsebool(config)
799 if relative is None:
800 if relative is None:
800 raise error.ConfigError(
801 raise error.ConfigError(
801 _(b"ui.relative-paths is not a boolean ('%s')") % config
802 _(b"ui.relative-paths is not a boolean ('%s')") % config
802 )
803 )
803
804
804 if relative:
805 if relative:
805 cwd = repo.getcwd()
806 cwd = repo.getcwd()
806 if cwd != b'':
807 if cwd != b'':
807 # this branch would work even if cwd == b'' (ie cwd = repo
808 # this branch would work even if cwd == b'' (ie cwd = repo
808 # root), but its generality makes the returned function slower
809 # root), but its generality makes the returned function slower
809 pathto = repo.pathto
810 pathto = repo.pathto
810 return lambda f: pathto(f, cwd)
811 return lambda f: pathto(f, cwd)
811 if repo.ui.configbool(b'ui', b'slash'):
812 if repo.ui.configbool(b'ui', b'slash'):
812 return lambda f: f
813 return lambda f: f
813 else:
814 else:
814 return util.localpath
815 return util.localpath
815
816
816
817
817 def subdiruipathfn(subpath, uipathfn):
818 def subdiruipathfn(subpath, uipathfn):
818 '''Create a new uipathfn that treats the file as relative to subpath.'''
819 '''Create a new uipathfn that treats the file as relative to subpath.'''
819 return lambda f: uipathfn(posixpath.join(subpath, f))
820 return lambda f: uipathfn(posixpath.join(subpath, f))
820
821
821
822
822 def anypats(pats, opts):
823 def anypats(pats, opts):
823 '''Checks if any patterns, including --include and --exclude were given.
824 '''Checks if any patterns, including --include and --exclude were given.
824
825
825 Some commands (e.g. addremove) use this condition for deciding whether to
826 Some commands (e.g. addremove) use this condition for deciding whether to
826 print absolute or relative paths.
827 print absolute or relative paths.
827 '''
828 '''
828 return bool(pats or opts.get(b'include') or opts.get(b'exclude'))
829 return bool(pats or opts.get(b'include') or opts.get(b'exclude'))
829
830
830
831
831 def expandpats(pats):
832 def expandpats(pats):
832 '''Expand bare globs when running on windows.
833 '''Expand bare globs when running on windows.
833 On posix we assume it already has already been done by sh.'''
834 On posix we assume it already has already been done by sh.'''
834 if not util.expandglobs:
835 if not util.expandglobs:
835 return list(pats)
836 return list(pats)
836 ret = []
837 ret = []
837 for kindpat in pats:
838 for kindpat in pats:
838 kind, pat = matchmod._patsplit(kindpat, None)
839 kind, pat = matchmod._patsplit(kindpat, None)
839 if kind is None:
840 if kind is None:
840 try:
841 try:
841 globbed = glob.glob(pat)
842 globbed = glob.glob(pat)
842 except re.error:
843 except re.error:
843 globbed = [pat]
844 globbed = [pat]
844 if globbed:
845 if globbed:
845 ret.extend(globbed)
846 ret.extend(globbed)
846 continue
847 continue
847 ret.append(kindpat)
848 ret.append(kindpat)
848 return ret
849 return ret
849
850
850
851
851 def matchandpats(
852 def matchandpats(
852 ctx, pats=(), opts=None, globbed=False, default=b'relpath', badfn=None
853 ctx, pats=(), opts=None, globbed=False, default=b'relpath', badfn=None
853 ):
854 ):
854 '''Return a matcher and the patterns that were used.
855 '''Return a matcher and the patterns that were used.
855 The matcher will warn about bad matches, unless an alternate badfn callback
856 The matcher will warn about bad matches, unless an alternate badfn callback
856 is provided.'''
857 is provided.'''
857 if opts is None:
858 if opts is None:
858 opts = {}
859 opts = {}
859 if not globbed and default == b'relpath':
860 if not globbed and default == b'relpath':
860 pats = expandpats(pats or [])
861 pats = expandpats(pats or [])
861
862
862 uipathfn = getuipathfn(ctx.repo(), legacyrelativevalue=True)
863 uipathfn = getuipathfn(ctx.repo(), legacyrelativevalue=True)
863
864
864 def bad(f, msg):
865 def bad(f, msg):
865 ctx.repo().ui.warn(b"%s: %s\n" % (uipathfn(f), msg))
866 ctx.repo().ui.warn(b"%s: %s\n" % (uipathfn(f), msg))
866
867
867 if badfn is None:
868 if badfn is None:
868 badfn = bad
869 badfn = bad
869
870
870 m = ctx.match(
871 m = ctx.match(
871 pats,
872 pats,
872 opts.get(b'include'),
873 opts.get(b'include'),
873 opts.get(b'exclude'),
874 opts.get(b'exclude'),
874 default,
875 default,
875 listsubrepos=opts.get(b'subrepos'),
876 listsubrepos=opts.get(b'subrepos'),
876 badfn=badfn,
877 badfn=badfn,
877 )
878 )
878
879
879 if m.always():
880 if m.always():
880 pats = []
881 pats = []
881 return m, pats
882 return m, pats
882
883
883
884
884 def match(
885 def match(
885 ctx, pats=(), opts=None, globbed=False, default=b'relpath', badfn=None
886 ctx, pats=(), opts=None, globbed=False, default=b'relpath', badfn=None
886 ):
887 ):
887 '''Return a matcher that will warn about bad matches.'''
888 '''Return a matcher that will warn about bad matches.'''
888 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
889 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
889
890
890
891
891 def matchall(repo):
892 def matchall(repo):
892 '''Return a matcher that will efficiently match everything.'''
893 '''Return a matcher that will efficiently match everything.'''
893 return matchmod.always()
894 return matchmod.always()
894
895
895
896
896 def matchfiles(repo, files, badfn=None):
897 def matchfiles(repo, files, badfn=None):
897 '''Return a matcher that will efficiently match exactly these files.'''
898 '''Return a matcher that will efficiently match exactly these files.'''
898 return matchmod.exact(files, badfn=badfn)
899 return matchmod.exact(files, badfn=badfn)
899
900
900
901
901 def parsefollowlinespattern(repo, rev, pat, msg):
902 def parsefollowlinespattern(repo, rev, pat, msg):
902 """Return a file name from `pat` pattern suitable for usage in followlines
903 """Return a file name from `pat` pattern suitable for usage in followlines
903 logic.
904 logic.
904 """
905 """
905 if not matchmod.patkind(pat):
906 if not matchmod.patkind(pat):
906 return pathutil.canonpath(repo.root, repo.getcwd(), pat)
907 return pathutil.canonpath(repo.root, repo.getcwd(), pat)
907 else:
908 else:
908 ctx = repo[rev]
909 ctx = repo[rev]
909 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=ctx)
910 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=ctx)
910 files = [f for f in ctx if m(f)]
911 files = [f for f in ctx if m(f)]
911 if len(files) != 1:
912 if len(files) != 1:
912 raise error.ParseError(msg)
913 raise error.ParseError(msg)
913 return files[0]
914 return files[0]
914
915
915
916
916 def getorigvfs(ui, repo):
917 def getorigvfs(ui, repo):
917 """return a vfs suitable to save 'orig' file
918 """return a vfs suitable to save 'orig' file
918
919
919 return None if no special directory is configured"""
920 return None if no special directory is configured"""
920 origbackuppath = ui.config(b'ui', b'origbackuppath')
921 origbackuppath = ui.config(b'ui', b'origbackuppath')
921 if not origbackuppath:
922 if not origbackuppath:
922 return None
923 return None
923 return vfs.vfs(repo.wvfs.join(origbackuppath))
924 return vfs.vfs(repo.wvfs.join(origbackuppath))
924
925
925
926
926 def backuppath(ui, repo, filepath):
927 def backuppath(ui, repo, filepath):
927 '''customize where working copy backup files (.orig files) are created
928 '''customize where working copy backup files (.orig files) are created
928
929
929 Fetch user defined path from config file: [ui] origbackuppath = <path>
930 Fetch user defined path from config file: [ui] origbackuppath = <path>
930 Fall back to default (filepath with .orig suffix) if not specified
931 Fall back to default (filepath with .orig suffix) if not specified
931
932
932 filepath is repo-relative
933 filepath is repo-relative
933
934
934 Returns an absolute path
935 Returns an absolute path
935 '''
936 '''
936 origvfs = getorigvfs(ui, repo)
937 origvfs = getorigvfs(ui, repo)
937 if origvfs is None:
938 if origvfs is None:
938 return repo.wjoin(filepath + b".orig")
939 return repo.wjoin(filepath + b".orig")
939
940
940 origbackupdir = origvfs.dirname(filepath)
941 origbackupdir = origvfs.dirname(filepath)
941 if not origvfs.isdir(origbackupdir) or origvfs.islink(origbackupdir):
942 if not origvfs.isdir(origbackupdir) or origvfs.islink(origbackupdir):
942 ui.note(_(b'creating directory: %s\n') % origvfs.join(origbackupdir))
943 ui.note(_(b'creating directory: %s\n') % origvfs.join(origbackupdir))
943
944
944 # Remove any files that conflict with the backup file's path
945 # Remove any files that conflict with the backup file's path
945 for f in reversed(list(pathutil.finddirs(filepath))):
946 for f in reversed(list(pathutil.finddirs(filepath))):
946 if origvfs.isfileorlink(f):
947 if origvfs.isfileorlink(f):
947 ui.note(_(b'removing conflicting file: %s\n') % origvfs.join(f))
948 ui.note(_(b'removing conflicting file: %s\n') % origvfs.join(f))
948 origvfs.unlink(f)
949 origvfs.unlink(f)
949 break
950 break
950
951
951 origvfs.makedirs(origbackupdir)
952 origvfs.makedirs(origbackupdir)
952
953
953 if origvfs.isdir(filepath) and not origvfs.islink(filepath):
954 if origvfs.isdir(filepath) and not origvfs.islink(filepath):
954 ui.note(
955 ui.note(
955 _(b'removing conflicting directory: %s\n') % origvfs.join(filepath)
956 _(b'removing conflicting directory: %s\n') % origvfs.join(filepath)
956 )
957 )
957 origvfs.rmtree(filepath, forcibly=True)
958 origvfs.rmtree(filepath, forcibly=True)
958
959
959 return origvfs.join(filepath)
960 return origvfs.join(filepath)
960
961
961
962
962 class _containsnode(object):
963 class _containsnode(object):
963 """proxy __contains__(node) to container.__contains__ which accepts revs"""
964 """proxy __contains__(node) to container.__contains__ which accepts revs"""
964
965
965 def __init__(self, repo, revcontainer):
966 def __init__(self, repo, revcontainer):
966 self._torev = repo.changelog.rev
967 self._torev = repo.changelog.rev
967 self._revcontains = revcontainer.__contains__
968 self._revcontains = revcontainer.__contains__
968
969
969 def __contains__(self, node):
970 def __contains__(self, node):
970 return self._revcontains(self._torev(node))
971 return self._revcontains(self._torev(node))
971
972
972
973
973 def cleanupnodes(
974 def cleanupnodes(
974 repo,
975 repo,
975 replacements,
976 replacements,
976 operation,
977 operation,
977 moves=None,
978 moves=None,
978 metadata=None,
979 metadata=None,
979 fixphase=False,
980 fixphase=False,
980 targetphase=None,
981 targetphase=None,
981 backup=True,
982 backup=True,
982 ):
983 ):
983 """do common cleanups when old nodes are replaced by new nodes
984 """do common cleanups when old nodes are replaced by new nodes
984
985
985 That includes writing obsmarkers or stripping nodes, and moving bookmarks.
986 That includes writing obsmarkers or stripping nodes, and moving bookmarks.
986 (we might also want to move working directory parent in the future)
987 (we might also want to move working directory parent in the future)
987
988
988 By default, bookmark moves are calculated automatically from 'replacements',
989 By default, bookmark moves are calculated automatically from 'replacements',
989 but 'moves' can be used to override that. Also, 'moves' may include
990 but 'moves' can be used to override that. Also, 'moves' may include
990 additional bookmark moves that should not have associated obsmarkers.
991 additional bookmark moves that should not have associated obsmarkers.
991
992
992 replacements is {oldnode: [newnode]} or a iterable of nodes if they do not
993 replacements is {oldnode: [newnode]} or a iterable of nodes if they do not
993 have replacements. operation is a string, like "rebase".
994 have replacements. operation is a string, like "rebase".
994
995
995 metadata is dictionary containing metadata to be stored in obsmarker if
996 metadata is dictionary containing metadata to be stored in obsmarker if
996 obsolescence is enabled.
997 obsolescence is enabled.
997 """
998 """
998 assert fixphase or targetphase is None
999 assert fixphase or targetphase is None
999 if not replacements and not moves:
1000 if not replacements and not moves:
1000 return
1001 return
1001
1002
1002 # translate mapping's other forms
1003 # translate mapping's other forms
1003 if not util.safehasattr(replacements, b'items'):
1004 if not util.safehasattr(replacements, b'items'):
1004 replacements = {(n,): () for n in replacements}
1005 replacements = {(n,): () for n in replacements}
1005 else:
1006 else:
1006 # upgrading non tuple "source" to tuple ones for BC
1007 # upgrading non tuple "source" to tuple ones for BC
1007 repls = {}
1008 repls = {}
1008 for key, value in replacements.items():
1009 for key, value in replacements.items():
1009 if not isinstance(key, tuple):
1010 if not isinstance(key, tuple):
1010 key = (key,)
1011 key = (key,)
1011 repls[key] = value
1012 repls[key] = value
1012 replacements = repls
1013 replacements = repls
1013
1014
1014 # Unfiltered repo is needed since nodes in replacements might be hidden.
1015 # Unfiltered repo is needed since nodes in replacements might be hidden.
1015 unfi = repo.unfiltered()
1016 unfi = repo.unfiltered()
1016
1017
1017 # Calculate bookmark movements
1018 # Calculate bookmark movements
1018 if moves is None:
1019 if moves is None:
1019 moves = {}
1020 moves = {}
1020 for oldnodes, newnodes in replacements.items():
1021 for oldnodes, newnodes in replacements.items():
1021 for oldnode in oldnodes:
1022 for oldnode in oldnodes:
1022 if oldnode in moves:
1023 if oldnode in moves:
1023 continue
1024 continue
1024 if len(newnodes) > 1:
1025 if len(newnodes) > 1:
1025 # usually a split, take the one with biggest rev number
1026 # usually a split, take the one with biggest rev number
1026 newnode = next(unfi.set(b'max(%ln)', newnodes)).node()
1027 newnode = next(unfi.set(b'max(%ln)', newnodes)).node()
1027 elif len(newnodes) == 0:
1028 elif len(newnodes) == 0:
1028 # move bookmark backwards
1029 # move bookmark backwards
1029 allreplaced = []
1030 allreplaced = []
1030 for rep in replacements:
1031 for rep in replacements:
1031 allreplaced.extend(rep)
1032 allreplaced.extend(rep)
1032 roots = list(
1033 roots = list(
1033 unfi.set(b'max((::%n) - %ln)', oldnode, allreplaced)
1034 unfi.set(b'max((::%n) - %ln)', oldnode, allreplaced)
1034 )
1035 )
1035 if roots:
1036 if roots:
1036 newnode = roots[0].node()
1037 newnode = roots[0].node()
1037 else:
1038 else:
1038 newnode = nullid
1039 newnode = nullid
1039 else:
1040 else:
1040 newnode = newnodes[0]
1041 newnode = newnodes[0]
1041 moves[oldnode] = newnode
1042 moves[oldnode] = newnode
1042
1043
1043 allnewnodes = [n for ns in replacements.values() for n in ns]
1044 allnewnodes = [n for ns in replacements.values() for n in ns]
1044 toretract = {}
1045 toretract = {}
1045 toadvance = {}
1046 toadvance = {}
1046 if fixphase:
1047 if fixphase:
1047 precursors = {}
1048 precursors = {}
1048 for oldnodes, newnodes in replacements.items():
1049 for oldnodes, newnodes in replacements.items():
1049 for oldnode in oldnodes:
1050 for oldnode in oldnodes:
1050 for newnode in newnodes:
1051 for newnode in newnodes:
1051 precursors.setdefault(newnode, []).append(oldnode)
1052 precursors.setdefault(newnode, []).append(oldnode)
1052
1053
1053 allnewnodes.sort(key=lambda n: unfi[n].rev())
1054 allnewnodes.sort(key=lambda n: unfi[n].rev())
1054 newphases = {}
1055 newphases = {}
1055
1056
1056 def phase(ctx):
1057 def phase(ctx):
1057 return newphases.get(ctx.node(), ctx.phase())
1058 return newphases.get(ctx.node(), ctx.phase())
1058
1059
1059 for newnode in allnewnodes:
1060 for newnode in allnewnodes:
1060 ctx = unfi[newnode]
1061 ctx = unfi[newnode]
1061 parentphase = max(phase(p) for p in ctx.parents())
1062 parentphase = max(phase(p) for p in ctx.parents())
1062 if targetphase is None:
1063 if targetphase is None:
1063 oldphase = max(
1064 oldphase = max(
1064 unfi[oldnode].phase() for oldnode in precursors[newnode]
1065 unfi[oldnode].phase() for oldnode in precursors[newnode]
1065 )
1066 )
1066 newphase = max(oldphase, parentphase)
1067 newphase = max(oldphase, parentphase)
1067 else:
1068 else:
1068 newphase = max(targetphase, parentphase)
1069 newphase = max(targetphase, parentphase)
1069 newphases[newnode] = newphase
1070 newphases[newnode] = newphase
1070 if newphase > ctx.phase():
1071 if newphase > ctx.phase():
1071 toretract.setdefault(newphase, []).append(newnode)
1072 toretract.setdefault(newphase, []).append(newnode)
1072 elif newphase < ctx.phase():
1073 elif newphase < ctx.phase():
1073 toadvance.setdefault(newphase, []).append(newnode)
1074 toadvance.setdefault(newphase, []).append(newnode)
1074
1075
1075 with repo.transaction(b'cleanup') as tr:
1076 with repo.transaction(b'cleanup') as tr:
1076 # Move bookmarks
1077 # Move bookmarks
1077 bmarks = repo._bookmarks
1078 bmarks = repo._bookmarks
1078 bmarkchanges = []
1079 bmarkchanges = []
1079 for oldnode, newnode in moves.items():
1080 for oldnode, newnode in moves.items():
1080 oldbmarks = repo.nodebookmarks(oldnode)
1081 oldbmarks = repo.nodebookmarks(oldnode)
1081 if not oldbmarks:
1082 if not oldbmarks:
1082 continue
1083 continue
1083 from . import bookmarks # avoid import cycle
1084 from . import bookmarks # avoid import cycle
1084
1085
1085 repo.ui.debug(
1086 repo.ui.debug(
1086 b'moving bookmarks %r from %s to %s\n'
1087 b'moving bookmarks %r from %s to %s\n'
1087 % (
1088 % (
1088 pycompat.rapply(pycompat.maybebytestr, oldbmarks),
1089 pycompat.rapply(pycompat.maybebytestr, oldbmarks),
1089 hex(oldnode),
1090 hex(oldnode),
1090 hex(newnode),
1091 hex(newnode),
1091 )
1092 )
1092 )
1093 )
1093 # Delete divergent bookmarks being parents of related newnodes
1094 # Delete divergent bookmarks being parents of related newnodes
1094 deleterevs = repo.revs(
1095 deleterevs = repo.revs(
1095 b'parents(roots(%ln & (::%n))) - parents(%n)',
1096 b'parents(roots(%ln & (::%n))) - parents(%n)',
1096 allnewnodes,
1097 allnewnodes,
1097 newnode,
1098 newnode,
1098 oldnode,
1099 oldnode,
1099 )
1100 )
1100 deletenodes = _containsnode(repo, deleterevs)
1101 deletenodes = _containsnode(repo, deleterevs)
1101 for name in oldbmarks:
1102 for name in oldbmarks:
1102 bmarkchanges.append((name, newnode))
1103 bmarkchanges.append((name, newnode))
1103 for b in bookmarks.divergent2delete(repo, deletenodes, name):
1104 for b in bookmarks.divergent2delete(repo, deletenodes, name):
1104 bmarkchanges.append((b, None))
1105 bmarkchanges.append((b, None))
1105
1106
1106 if bmarkchanges:
1107 if bmarkchanges:
1107 bmarks.applychanges(repo, tr, bmarkchanges)
1108 bmarks.applychanges(repo, tr, bmarkchanges)
1108
1109
1109 for phase, nodes in toretract.items():
1110 for phase, nodes in toretract.items():
1110 phases.retractboundary(repo, tr, phase, nodes)
1111 phases.retractboundary(repo, tr, phase, nodes)
1111 for phase, nodes in toadvance.items():
1112 for phase, nodes in toadvance.items():
1112 phases.advanceboundary(repo, tr, phase, nodes)
1113 phases.advanceboundary(repo, tr, phase, nodes)
1113
1114
1114 mayusearchived = repo.ui.config(b'experimental', b'cleanup-as-archived')
1115 mayusearchived = repo.ui.config(b'experimental', b'cleanup-as-archived')
1115 # Obsolete or strip nodes
1116 # Obsolete or strip nodes
1116 if obsolete.isenabled(repo, obsolete.createmarkersopt):
1117 if obsolete.isenabled(repo, obsolete.createmarkersopt):
1117 # If a node is already obsoleted, and we want to obsolete it
1118 # If a node is already obsoleted, and we want to obsolete it
1118 # without a successor, skip that obssolete request since it's
1119 # without a successor, skip that obssolete request since it's
1119 # unnecessary. That's the "if s or not isobs(n)" check below.
1120 # unnecessary. That's the "if s or not isobs(n)" check below.
1120 # Also sort the node in topology order, that might be useful for
1121 # Also sort the node in topology order, that might be useful for
1121 # some obsstore logic.
1122 # some obsstore logic.
1122 # NOTE: the sorting might belong to createmarkers.
1123 # NOTE: the sorting might belong to createmarkers.
1123 torev = unfi.changelog.rev
1124 torev = unfi.changelog.rev
1124 sortfunc = lambda ns: torev(ns[0][0])
1125 sortfunc = lambda ns: torev(ns[0][0])
1125 rels = []
1126 rels = []
1126 for ns, s in sorted(replacements.items(), key=sortfunc):
1127 for ns, s in sorted(replacements.items(), key=sortfunc):
1127 rel = (tuple(unfi[n] for n in ns), tuple(unfi[m] for m in s))
1128 rel = (tuple(unfi[n] for n in ns), tuple(unfi[m] for m in s))
1128 rels.append(rel)
1129 rels.append(rel)
1129 if rels:
1130 if rels:
1130 obsolete.createmarkers(
1131 obsolete.createmarkers(
1131 repo, rels, operation=operation, metadata=metadata
1132 repo, rels, operation=operation, metadata=metadata
1132 )
1133 )
1133 elif phases.supportinternal(repo) and mayusearchived:
1134 elif phases.supportinternal(repo) and mayusearchived:
1134 # this assume we do not have "unstable" nodes above the cleaned ones
1135 # this assume we do not have "unstable" nodes above the cleaned ones
1135 allreplaced = set()
1136 allreplaced = set()
1136 for ns in replacements.keys():
1137 for ns in replacements.keys():
1137 allreplaced.update(ns)
1138 allreplaced.update(ns)
1138 if backup:
1139 if backup:
1139 from . import repair # avoid import cycle
1140 from . import repair # avoid import cycle
1140
1141
1141 node = min(allreplaced, key=repo.changelog.rev)
1142 node = min(allreplaced, key=repo.changelog.rev)
1142 repair.backupbundle(
1143 repair.backupbundle(
1143 repo, allreplaced, allreplaced, node, operation
1144 repo, allreplaced, allreplaced, node, operation
1144 )
1145 )
1145 phases.retractboundary(repo, tr, phases.archived, allreplaced)
1146 phases.retractboundary(repo, tr, phases.archived, allreplaced)
1146 else:
1147 else:
1147 from . import repair # avoid import cycle
1148 from . import repair # avoid import cycle
1148
1149
1149 tostrip = list(n for ns in replacements for n in ns)
1150 tostrip = list(n for ns in replacements for n in ns)
1150 if tostrip:
1151 if tostrip:
1151 repair.delayedstrip(
1152 repair.delayedstrip(
1152 repo.ui, repo, tostrip, operation, backup=backup
1153 repo.ui, repo, tostrip, operation, backup=backup
1153 )
1154 )
1154
1155
1155
1156
1156 def addremove(repo, matcher, prefix, uipathfn, opts=None):
1157 def addremove(repo, matcher, prefix, uipathfn, opts=None):
1157 if opts is None:
1158 if opts is None:
1158 opts = {}
1159 opts = {}
1159 m = matcher
1160 m = matcher
1160 dry_run = opts.get(b'dry_run')
1161 dry_run = opts.get(b'dry_run')
1161 try:
1162 try:
1162 similarity = float(opts.get(b'similarity') or 0)
1163 similarity = float(opts.get(b'similarity') or 0)
1163 except ValueError:
1164 except ValueError:
1164 raise error.Abort(_(b'similarity must be a number'))
1165 raise error.Abort(_(b'similarity must be a number'))
1165 if similarity < 0 or similarity > 100:
1166 if similarity < 0 or similarity > 100:
1166 raise error.Abort(_(b'similarity must be between 0 and 100'))
1167 raise error.Abort(_(b'similarity must be between 0 and 100'))
1167 similarity /= 100.0
1168 similarity /= 100.0
1168
1169
1169 ret = 0
1170 ret = 0
1170
1171
1171 wctx = repo[None]
1172 wctx = repo[None]
1172 for subpath in sorted(wctx.substate):
1173 for subpath in sorted(wctx.substate):
1173 submatch = matchmod.subdirmatcher(subpath, m)
1174 submatch = matchmod.subdirmatcher(subpath, m)
1174 if opts.get(b'subrepos') or m.exact(subpath) or any(submatch.files()):
1175 if opts.get(b'subrepos') or m.exact(subpath) or any(submatch.files()):
1175 sub = wctx.sub(subpath)
1176 sub = wctx.sub(subpath)
1176 subprefix = repo.wvfs.reljoin(prefix, subpath)
1177 subprefix = repo.wvfs.reljoin(prefix, subpath)
1177 subuipathfn = subdiruipathfn(subpath, uipathfn)
1178 subuipathfn = subdiruipathfn(subpath, uipathfn)
1178 try:
1179 try:
1179 if sub.addremove(submatch, subprefix, subuipathfn, opts):
1180 if sub.addremove(submatch, subprefix, subuipathfn, opts):
1180 ret = 1
1181 ret = 1
1181 except error.LookupError:
1182 except error.LookupError:
1182 repo.ui.status(
1183 repo.ui.status(
1183 _(b"skipping missing subrepository: %s\n")
1184 _(b"skipping missing subrepository: %s\n")
1184 % uipathfn(subpath)
1185 % uipathfn(subpath)
1185 )
1186 )
1186
1187
1187 rejected = []
1188 rejected = []
1188
1189
1189 def badfn(f, msg):
1190 def badfn(f, msg):
1190 if f in m.files():
1191 if f in m.files():
1191 m.bad(f, msg)
1192 m.bad(f, msg)
1192 rejected.append(f)
1193 rejected.append(f)
1193
1194
1194 badmatch = matchmod.badmatch(m, badfn)
1195 badmatch = matchmod.badmatch(m, badfn)
1195 added, unknown, deleted, removed, forgotten = _interestingfiles(
1196 added, unknown, deleted, removed, forgotten = _interestingfiles(
1196 repo, badmatch
1197 repo, badmatch
1197 )
1198 )
1198
1199
1199 unknownset = set(unknown + forgotten)
1200 unknownset = set(unknown + forgotten)
1200 toprint = unknownset.copy()
1201 toprint = unknownset.copy()
1201 toprint.update(deleted)
1202 toprint.update(deleted)
1202 for abs in sorted(toprint):
1203 for abs in sorted(toprint):
1203 if repo.ui.verbose or not m.exact(abs):
1204 if repo.ui.verbose or not m.exact(abs):
1204 if abs in unknownset:
1205 if abs in unknownset:
1205 status = _(b'adding %s\n') % uipathfn(abs)
1206 status = _(b'adding %s\n') % uipathfn(abs)
1206 label = b'ui.addremove.added'
1207 label = b'ui.addremove.added'
1207 else:
1208 else:
1208 status = _(b'removing %s\n') % uipathfn(abs)
1209 status = _(b'removing %s\n') % uipathfn(abs)
1209 label = b'ui.addremove.removed'
1210 label = b'ui.addremove.removed'
1210 repo.ui.status(status, label=label)
1211 repo.ui.status(status, label=label)
1211
1212
1212 renames = _findrenames(
1213 renames = _findrenames(
1213 repo, m, added + unknown, removed + deleted, similarity, uipathfn
1214 repo, m, added + unknown, removed + deleted, similarity, uipathfn
1214 )
1215 )
1215
1216
1216 if not dry_run:
1217 if not dry_run:
1217 _markchanges(repo, unknown + forgotten, deleted, renames)
1218 _markchanges(repo, unknown + forgotten, deleted, renames)
1218
1219
1219 for f in rejected:
1220 for f in rejected:
1220 if f in m.files():
1221 if f in m.files():
1221 return 1
1222 return 1
1222 return ret
1223 return ret
1223
1224
1224
1225
1225 def marktouched(repo, files, similarity=0.0):
1226 def marktouched(repo, files, similarity=0.0):
1226 '''Assert that files have somehow been operated upon. files are relative to
1227 '''Assert that files have somehow been operated upon. files are relative to
1227 the repo root.'''
1228 the repo root.'''
1228 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
1229 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
1229 rejected = []
1230 rejected = []
1230
1231
1231 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
1232 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
1232
1233
1233 if repo.ui.verbose:
1234 if repo.ui.verbose:
1234 unknownset = set(unknown + forgotten)
1235 unknownset = set(unknown + forgotten)
1235 toprint = unknownset.copy()
1236 toprint = unknownset.copy()
1236 toprint.update(deleted)
1237 toprint.update(deleted)
1237 for abs in sorted(toprint):
1238 for abs in sorted(toprint):
1238 if abs in unknownset:
1239 if abs in unknownset:
1239 status = _(b'adding %s\n') % abs
1240 status = _(b'adding %s\n') % abs
1240 else:
1241 else:
1241 status = _(b'removing %s\n') % abs
1242 status = _(b'removing %s\n') % abs
1242 repo.ui.status(status)
1243 repo.ui.status(status)
1243
1244
1244 # TODO: We should probably have the caller pass in uipathfn and apply it to
1245 # TODO: We should probably have the caller pass in uipathfn and apply it to
1245 # the messages above too. legacyrelativevalue=True is consistent with how
1246 # the messages above too. legacyrelativevalue=True is consistent with how
1246 # it used to work.
1247 # it used to work.
1247 uipathfn = getuipathfn(repo, legacyrelativevalue=True)
1248 uipathfn = getuipathfn(repo, legacyrelativevalue=True)
1248 renames = _findrenames(
1249 renames = _findrenames(
1249 repo, m, added + unknown, removed + deleted, similarity, uipathfn
1250 repo, m, added + unknown, removed + deleted, similarity, uipathfn
1250 )
1251 )
1251
1252
1252 _markchanges(repo, unknown + forgotten, deleted, renames)
1253 _markchanges(repo, unknown + forgotten, deleted, renames)
1253
1254
1254 for f in rejected:
1255 for f in rejected:
1255 if f in m.files():
1256 if f in m.files():
1256 return 1
1257 return 1
1257 return 0
1258 return 0
1258
1259
1259
1260
1260 def _interestingfiles(repo, matcher):
1261 def _interestingfiles(repo, matcher):
1261 '''Walk dirstate with matcher, looking for files that addremove would care
1262 '''Walk dirstate with matcher, looking for files that addremove would care
1262 about.
1263 about.
1263
1264
1264 This is different from dirstate.status because it doesn't care about
1265 This is different from dirstate.status because it doesn't care about
1265 whether files are modified or clean.'''
1266 whether files are modified or clean.'''
1266 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1267 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1267 audit_path = pathutil.pathauditor(repo.root, cached=True)
1268 audit_path = pathutil.pathauditor(repo.root, cached=True)
1268
1269
1269 ctx = repo[None]
1270 ctx = repo[None]
1270 dirstate = repo.dirstate
1271 dirstate = repo.dirstate
1271 matcher = repo.narrowmatch(matcher, includeexact=True)
1272 matcher = repo.narrowmatch(matcher, includeexact=True)
1272 walkresults = dirstate.walk(
1273 walkresults = dirstate.walk(
1273 matcher,
1274 matcher,
1274 subrepos=sorted(ctx.substate),
1275 subrepos=sorted(ctx.substate),
1275 unknown=True,
1276 unknown=True,
1276 ignored=False,
1277 ignored=False,
1277 full=False,
1278 full=False,
1278 )
1279 )
1279 for abs, st in pycompat.iteritems(walkresults):
1280 for abs, st in pycompat.iteritems(walkresults):
1280 dstate = dirstate[abs]
1281 dstate = dirstate[abs]
1281 if dstate == b'?' and audit_path.check(abs):
1282 if dstate == b'?' and audit_path.check(abs):
1282 unknown.append(abs)
1283 unknown.append(abs)
1283 elif dstate != b'r' and not st:
1284 elif dstate != b'r' and not st:
1284 deleted.append(abs)
1285 deleted.append(abs)
1285 elif dstate == b'r' and st:
1286 elif dstate == b'r' and st:
1286 forgotten.append(abs)
1287 forgotten.append(abs)
1287 # for finding renames
1288 # for finding renames
1288 elif dstate == b'r' and not st:
1289 elif dstate == b'r' and not st:
1289 removed.append(abs)
1290 removed.append(abs)
1290 elif dstate == b'a':
1291 elif dstate == b'a':
1291 added.append(abs)
1292 added.append(abs)
1292
1293
1293 return added, unknown, deleted, removed, forgotten
1294 return added, unknown, deleted, removed, forgotten
1294
1295
1295
1296
1296 def _findrenames(repo, matcher, added, removed, similarity, uipathfn):
1297 def _findrenames(repo, matcher, added, removed, similarity, uipathfn):
1297 '''Find renames from removed files to added ones.'''
1298 '''Find renames from removed files to added ones.'''
1298 renames = {}
1299 renames = {}
1299 if similarity > 0:
1300 if similarity > 0:
1300 for old, new, score in similar.findrenames(
1301 for old, new, score in similar.findrenames(
1301 repo, added, removed, similarity
1302 repo, added, removed, similarity
1302 ):
1303 ):
1303 if (
1304 if (
1304 repo.ui.verbose
1305 repo.ui.verbose
1305 or not matcher.exact(old)
1306 or not matcher.exact(old)
1306 or not matcher.exact(new)
1307 or not matcher.exact(new)
1307 ):
1308 ):
1308 repo.ui.status(
1309 repo.ui.status(
1309 _(
1310 _(
1310 b'recording removal of %s as rename to %s '
1311 b'recording removal of %s as rename to %s '
1311 b'(%d%% similar)\n'
1312 b'(%d%% similar)\n'
1312 )
1313 )
1313 % (uipathfn(old), uipathfn(new), score * 100)
1314 % (uipathfn(old), uipathfn(new), score * 100)
1314 )
1315 )
1315 renames[new] = old
1316 renames[new] = old
1316 return renames
1317 return renames
1317
1318
1318
1319
1319 def _markchanges(repo, unknown, deleted, renames):
1320 def _markchanges(repo, unknown, deleted, renames):
1320 '''Marks the files in unknown as added, the files in deleted as removed,
1321 '''Marks the files in unknown as added, the files in deleted as removed,
1321 and the files in renames as copied.'''
1322 and the files in renames as copied.'''
1322 wctx = repo[None]
1323 wctx = repo[None]
1323 with repo.wlock():
1324 with repo.wlock():
1324 wctx.forget(deleted)
1325 wctx.forget(deleted)
1325 wctx.add(unknown)
1326 wctx.add(unknown)
1326 for new, old in pycompat.iteritems(renames):
1327 for new, old in pycompat.iteritems(renames):
1327 wctx.copy(old, new)
1328 wctx.copy(old, new)
1328
1329
1329
1330
1330 def getrenamedfn(repo, endrev=None):
1331 def getrenamedfn(repo, endrev=None):
1331 if copiesmod.usechangesetcentricalgo(repo):
1332 if copiesmod.usechangesetcentricalgo(repo):
1332
1333
1333 def getrenamed(fn, rev):
1334 def getrenamed(fn, rev):
1334 ctx = repo[rev]
1335 ctx = repo[rev]
1335 p1copies = ctx.p1copies()
1336 p1copies = ctx.p1copies()
1336 if fn in p1copies:
1337 if fn in p1copies:
1337 return p1copies[fn]
1338 return p1copies[fn]
1338 p2copies = ctx.p2copies()
1339 p2copies = ctx.p2copies()
1339 if fn in p2copies:
1340 if fn in p2copies:
1340 return p2copies[fn]
1341 return p2copies[fn]
1341 return None
1342 return None
1342
1343
1343 return getrenamed
1344 return getrenamed
1344
1345
1345 rcache = {}
1346 rcache = {}
1346 if endrev is None:
1347 if endrev is None:
1347 endrev = len(repo)
1348 endrev = len(repo)
1348
1349
1349 def getrenamed(fn, rev):
1350 def getrenamed(fn, rev):
1350 '''looks up all renames for a file (up to endrev) the first
1351 '''looks up all renames for a file (up to endrev) the first
1351 time the file is given. It indexes on the changerev and only
1352 time the file is given. It indexes on the changerev and only
1352 parses the manifest if linkrev != changerev.
1353 parses the manifest if linkrev != changerev.
1353 Returns rename info for fn at changerev rev.'''
1354 Returns rename info for fn at changerev rev.'''
1354 if fn not in rcache:
1355 if fn not in rcache:
1355 rcache[fn] = {}
1356 rcache[fn] = {}
1356 fl = repo.file(fn)
1357 fl = repo.file(fn)
1357 for i in fl:
1358 for i in fl:
1358 lr = fl.linkrev(i)
1359 lr = fl.linkrev(i)
1359 renamed = fl.renamed(fl.node(i))
1360 renamed = fl.renamed(fl.node(i))
1360 rcache[fn][lr] = renamed and renamed[0]
1361 rcache[fn][lr] = renamed and renamed[0]
1361 if lr >= endrev:
1362 if lr >= endrev:
1362 break
1363 break
1363 if rev in rcache[fn]:
1364 if rev in rcache[fn]:
1364 return rcache[fn][rev]
1365 return rcache[fn][rev]
1365
1366
1366 # If linkrev != rev (i.e. rev not found in rcache) fallback to
1367 # If linkrev != rev (i.e. rev not found in rcache) fallback to
1367 # filectx logic.
1368 # filectx logic.
1368 try:
1369 try:
1369 return repo[rev][fn].copysource()
1370 return repo[rev][fn].copysource()
1370 except error.LookupError:
1371 except error.LookupError:
1371 return None
1372 return None
1372
1373
1373 return getrenamed
1374 return getrenamed
1374
1375
1375
1376
1376 def getcopiesfn(repo, endrev=None):
1377 def getcopiesfn(repo, endrev=None):
1377 if copiesmod.usechangesetcentricalgo(repo):
1378 if copiesmod.usechangesetcentricalgo(repo):
1378
1379
1379 def copiesfn(ctx):
1380 def copiesfn(ctx):
1380 if ctx.p2copies():
1381 if ctx.p2copies():
1381 allcopies = ctx.p1copies().copy()
1382 allcopies = ctx.p1copies().copy()
1382 # There should be no overlap
1383 # There should be no overlap
1383 allcopies.update(ctx.p2copies())
1384 allcopies.update(ctx.p2copies())
1384 return sorted(allcopies.items())
1385 return sorted(allcopies.items())
1385 else:
1386 else:
1386 return sorted(ctx.p1copies().items())
1387 return sorted(ctx.p1copies().items())
1387
1388
1388 else:
1389 else:
1389 getrenamed = getrenamedfn(repo, endrev)
1390 getrenamed = getrenamedfn(repo, endrev)
1390
1391
1391 def copiesfn(ctx):
1392 def copiesfn(ctx):
1392 copies = []
1393 copies = []
1393 for fn in ctx.files():
1394 for fn in ctx.files():
1394 rename = getrenamed(fn, ctx.rev())
1395 rename = getrenamed(fn, ctx.rev())
1395 if rename:
1396 if rename:
1396 copies.append((fn, rename))
1397 copies.append((fn, rename))
1397 return copies
1398 return copies
1398
1399
1399 return copiesfn
1400 return copiesfn
1400
1401
1401
1402
1402 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1403 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1403 """Update the dirstate to reflect the intent of copying src to dst. For
1404 """Update the dirstate to reflect the intent of copying src to dst. For
1404 different reasons it might not end with dst being marked as copied from src.
1405 different reasons it might not end with dst being marked as copied from src.
1405 """
1406 """
1406 origsrc = repo.dirstate.copied(src) or src
1407 origsrc = repo.dirstate.copied(src) or src
1407 if dst == origsrc: # copying back a copy?
1408 if dst == origsrc: # copying back a copy?
1408 if repo.dirstate[dst] not in b'mn' and not dryrun:
1409 if repo.dirstate[dst] not in b'mn' and not dryrun:
1409 repo.dirstate.normallookup(dst)
1410 repo.dirstate.normallookup(dst)
1410 else:
1411 else:
1411 if repo.dirstate[origsrc] == b'a' and origsrc == src:
1412 if repo.dirstate[origsrc] == b'a' and origsrc == src:
1412 if not ui.quiet:
1413 if not ui.quiet:
1413 ui.warn(
1414 ui.warn(
1414 _(
1415 _(
1415 b"%s has not been committed yet, so no copy "
1416 b"%s has not been committed yet, so no copy "
1416 b"data will be stored for %s.\n"
1417 b"data will be stored for %s.\n"
1417 )
1418 )
1418 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd))
1419 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd))
1419 )
1420 )
1420 if repo.dirstate[dst] in b'?r' and not dryrun:
1421 if repo.dirstate[dst] in b'?r' and not dryrun:
1421 wctx.add([dst])
1422 wctx.add([dst])
1422 elif not dryrun:
1423 elif not dryrun:
1423 wctx.copy(origsrc, dst)
1424 wctx.copy(origsrc, dst)
1424
1425
1425
1426
1426 def movedirstate(repo, newctx, match=None):
1427 def movedirstate(repo, newctx, match=None):
1427 """Move the dirstate to newctx and adjust it as necessary.
1428 """Move the dirstate to newctx and adjust it as necessary.
1428
1429
1429 A matcher can be provided as an optimization. It is probably a bug to pass
1430 A matcher can be provided as an optimization. It is probably a bug to pass
1430 a matcher that doesn't match all the differences between the parent of the
1431 a matcher that doesn't match all the differences between the parent of the
1431 working copy and newctx.
1432 working copy and newctx.
1432 """
1433 """
1433 oldctx = repo[b'.']
1434 oldctx = repo[b'.']
1434 ds = repo.dirstate
1435 ds = repo.dirstate
1435 copies = dict(ds.copies())
1436 copies = dict(ds.copies())
1436 ds.setparents(newctx.node(), nullid)
1437 ds.setparents(newctx.node(), nullid)
1437 s = newctx.status(oldctx, match=match)
1438 s = newctx.status(oldctx, match=match)
1438 for f in s.modified:
1439 for f in s.modified:
1439 if ds[f] == b'r':
1440 if ds[f] == b'r':
1440 # modified + removed -> removed
1441 # modified + removed -> removed
1441 continue
1442 continue
1442 ds.normallookup(f)
1443 ds.normallookup(f)
1443
1444
1444 for f in s.added:
1445 for f in s.added:
1445 if ds[f] == b'r':
1446 if ds[f] == b'r':
1446 # added + removed -> unknown
1447 # added + removed -> unknown
1447 ds.drop(f)
1448 ds.drop(f)
1448 elif ds[f] != b'a':
1449 elif ds[f] != b'a':
1449 ds.add(f)
1450 ds.add(f)
1450
1451
1451 for f in s.removed:
1452 for f in s.removed:
1452 if ds[f] == b'a':
1453 if ds[f] == b'a':
1453 # removed + added -> normal
1454 # removed + added -> normal
1454 ds.normallookup(f)
1455 ds.normallookup(f)
1455 elif ds[f] != b'r':
1456 elif ds[f] != b'r':
1456 ds.remove(f)
1457 ds.remove(f)
1457
1458
1458 # Merge old parent and old working dir copies
1459 # Merge old parent and old working dir copies
1459 oldcopies = copiesmod.pathcopies(newctx, oldctx, match)
1460 oldcopies = copiesmod.pathcopies(newctx, oldctx, match)
1460 oldcopies.update(copies)
1461 oldcopies.update(copies)
1461 copies = {
1462 copies = {
1462 dst: oldcopies.get(src, src)
1463 dst: oldcopies.get(src, src)
1463 for dst, src in pycompat.iteritems(oldcopies)
1464 for dst, src in pycompat.iteritems(oldcopies)
1464 }
1465 }
1465 # Adjust the dirstate copies
1466 # Adjust the dirstate copies
1466 for dst, src in pycompat.iteritems(copies):
1467 for dst, src in pycompat.iteritems(copies):
1467 if src not in newctx or dst in newctx or ds[dst] != b'a':
1468 if src not in newctx or dst in newctx or ds[dst] != b'a':
1468 src = None
1469 src = None
1469 ds.copy(src, dst)
1470 ds.copy(src, dst)
1470 repo._quick_access_changeid_invalidate()
1471 repo._quick_access_changeid_invalidate()
1471
1472
1472
1473
1474 def filterrequirements(requirements):
1475 """ filters the requirements into two sets:
1476
1477 wcreq: requirements which should be written in .hg/requires
1478 storereq: which should be written in .hg/store/requires
1479
1480 Returns (wcreq, storereq)
1481 """
1482 if False:
1483 wc, store = set(), set()
1484 for r in requirements:
1485 if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
1486 wc.add(r)
1487 else:
1488 store.add(r)
1489 return wc, store
1490 return requirements, None
1491
1492
1473 def writereporequirements(repo, requirements=None):
1493 def writereporequirements(repo, requirements=None):
1474 """ writes requirements for the repo to .hg/requires """
1494 """ writes requirements for the repo to .hg/requires """
1475 if requirements:
1495 if requirements:
1476 repo.requirements = requirements
1496 repo.requirements = requirements
1477 writerequires(repo.vfs, repo.requirements)
1497 wcreq, storereq = filterrequirements(repo.requirements)
1498 if wcreq is not None:
1499 writerequires(repo.vfs, wcreq)
1500 if storereq is not None:
1501 writerequires(repo.svfs, storereq)
1478
1502
1479
1503
1480 def writerequires(opener, requirements):
1504 def writerequires(opener, requirements):
1481 with opener(b'requires', b'w', atomictemp=True) as fp:
1505 with opener(b'requires', b'w', atomictemp=True) as fp:
1482 for r in sorted(requirements):
1506 for r in sorted(requirements):
1483 fp.write(b"%s\n" % r)
1507 fp.write(b"%s\n" % r)
1484
1508
1485
1509
1486 class filecachesubentry(object):
1510 class filecachesubentry(object):
1487 def __init__(self, path, stat):
1511 def __init__(self, path, stat):
1488 self.path = path
1512 self.path = path
1489 self.cachestat = None
1513 self.cachestat = None
1490 self._cacheable = None
1514 self._cacheable = None
1491
1515
1492 if stat:
1516 if stat:
1493 self.cachestat = filecachesubentry.stat(self.path)
1517 self.cachestat = filecachesubentry.stat(self.path)
1494
1518
1495 if self.cachestat:
1519 if self.cachestat:
1496 self._cacheable = self.cachestat.cacheable()
1520 self._cacheable = self.cachestat.cacheable()
1497 else:
1521 else:
1498 # None means we don't know yet
1522 # None means we don't know yet
1499 self._cacheable = None
1523 self._cacheable = None
1500
1524
1501 def refresh(self):
1525 def refresh(self):
1502 if self.cacheable():
1526 if self.cacheable():
1503 self.cachestat = filecachesubentry.stat(self.path)
1527 self.cachestat = filecachesubentry.stat(self.path)
1504
1528
1505 def cacheable(self):
1529 def cacheable(self):
1506 if self._cacheable is not None:
1530 if self._cacheable is not None:
1507 return self._cacheable
1531 return self._cacheable
1508
1532
1509 # we don't know yet, assume it is for now
1533 # we don't know yet, assume it is for now
1510 return True
1534 return True
1511
1535
1512 def changed(self):
1536 def changed(self):
1513 # no point in going further if we can't cache it
1537 # no point in going further if we can't cache it
1514 if not self.cacheable():
1538 if not self.cacheable():
1515 return True
1539 return True
1516
1540
1517 newstat = filecachesubentry.stat(self.path)
1541 newstat = filecachesubentry.stat(self.path)
1518
1542
1519 # we may not know if it's cacheable yet, check again now
1543 # we may not know if it's cacheable yet, check again now
1520 if newstat and self._cacheable is None:
1544 if newstat and self._cacheable is None:
1521 self._cacheable = newstat.cacheable()
1545 self._cacheable = newstat.cacheable()
1522
1546
1523 # check again
1547 # check again
1524 if not self._cacheable:
1548 if not self._cacheable:
1525 return True
1549 return True
1526
1550
1527 if self.cachestat != newstat:
1551 if self.cachestat != newstat:
1528 self.cachestat = newstat
1552 self.cachestat = newstat
1529 return True
1553 return True
1530 else:
1554 else:
1531 return False
1555 return False
1532
1556
1533 @staticmethod
1557 @staticmethod
1534 def stat(path):
1558 def stat(path):
1535 try:
1559 try:
1536 return util.cachestat(path)
1560 return util.cachestat(path)
1537 except OSError as e:
1561 except OSError as e:
1538 if e.errno != errno.ENOENT:
1562 if e.errno != errno.ENOENT:
1539 raise
1563 raise
1540
1564
1541
1565
1542 class filecacheentry(object):
1566 class filecacheentry(object):
1543 def __init__(self, paths, stat=True):
1567 def __init__(self, paths, stat=True):
1544 self._entries = []
1568 self._entries = []
1545 for path in paths:
1569 for path in paths:
1546 self._entries.append(filecachesubentry(path, stat))
1570 self._entries.append(filecachesubentry(path, stat))
1547
1571
1548 def changed(self):
1572 def changed(self):
1549 '''true if any entry has changed'''
1573 '''true if any entry has changed'''
1550 for entry in self._entries:
1574 for entry in self._entries:
1551 if entry.changed():
1575 if entry.changed():
1552 return True
1576 return True
1553 return False
1577 return False
1554
1578
1555 def refresh(self):
1579 def refresh(self):
1556 for entry in self._entries:
1580 for entry in self._entries:
1557 entry.refresh()
1581 entry.refresh()
1558
1582
1559
1583
1560 class filecache(object):
1584 class filecache(object):
1561 """A property like decorator that tracks files under .hg/ for updates.
1585 """A property like decorator that tracks files under .hg/ for updates.
1562
1586
1563 On first access, the files defined as arguments are stat()ed and the
1587 On first access, the files defined as arguments are stat()ed and the
1564 results cached. The decorated function is called. The results are stashed
1588 results cached. The decorated function is called. The results are stashed
1565 away in a ``_filecache`` dict on the object whose method is decorated.
1589 away in a ``_filecache`` dict on the object whose method is decorated.
1566
1590
1567 On subsequent access, the cached result is used as it is set to the
1591 On subsequent access, the cached result is used as it is set to the
1568 instance dictionary.
1592 instance dictionary.
1569
1593
1570 On external property set/delete operations, the caller must update the
1594 On external property set/delete operations, the caller must update the
1571 corresponding _filecache entry appropriately. Use __class__.<attr>.set()
1595 corresponding _filecache entry appropriately. Use __class__.<attr>.set()
1572 instead of directly setting <attr>.
1596 instead of directly setting <attr>.
1573
1597
1574 When using the property API, the cached data is always used if available.
1598 When using the property API, the cached data is always used if available.
1575 No stat() is performed to check if the file has changed.
1599 No stat() is performed to check if the file has changed.
1576
1600
1577 Others can muck about with the state of the ``_filecache`` dict. e.g. they
1601 Others can muck about with the state of the ``_filecache`` dict. e.g. they
1578 can populate an entry before the property's getter is called. In this case,
1602 can populate an entry before the property's getter is called. In this case,
1579 entries in ``_filecache`` will be used during property operations,
1603 entries in ``_filecache`` will be used during property operations,
1580 if available. If the underlying file changes, it is up to external callers
1604 if available. If the underlying file changes, it is up to external callers
1581 to reflect this by e.g. calling ``delattr(obj, attr)`` to remove the cached
1605 to reflect this by e.g. calling ``delattr(obj, attr)`` to remove the cached
1582 method result as well as possibly calling ``del obj._filecache[attr]`` to
1606 method result as well as possibly calling ``del obj._filecache[attr]`` to
1583 remove the ``filecacheentry``.
1607 remove the ``filecacheentry``.
1584 """
1608 """
1585
1609
1586 def __init__(self, *paths):
1610 def __init__(self, *paths):
1587 self.paths = paths
1611 self.paths = paths
1588
1612
1589 def join(self, obj, fname):
1613 def join(self, obj, fname):
1590 """Used to compute the runtime path of a cached file.
1614 """Used to compute the runtime path of a cached file.
1591
1615
1592 Users should subclass filecache and provide their own version of this
1616 Users should subclass filecache and provide their own version of this
1593 function to call the appropriate join function on 'obj' (an instance
1617 function to call the appropriate join function on 'obj' (an instance
1594 of the class that its member function was decorated).
1618 of the class that its member function was decorated).
1595 """
1619 """
1596 raise NotImplementedError
1620 raise NotImplementedError
1597
1621
1598 def __call__(self, func):
1622 def __call__(self, func):
1599 self.func = func
1623 self.func = func
1600 self.sname = func.__name__
1624 self.sname = func.__name__
1601 self.name = pycompat.sysbytes(self.sname)
1625 self.name = pycompat.sysbytes(self.sname)
1602 return self
1626 return self
1603
1627
1604 def __get__(self, obj, type=None):
1628 def __get__(self, obj, type=None):
1605 # if accessed on the class, return the descriptor itself.
1629 # if accessed on the class, return the descriptor itself.
1606 if obj is None:
1630 if obj is None:
1607 return self
1631 return self
1608
1632
1609 assert self.sname not in obj.__dict__
1633 assert self.sname not in obj.__dict__
1610
1634
1611 entry = obj._filecache.get(self.name)
1635 entry = obj._filecache.get(self.name)
1612
1636
1613 if entry:
1637 if entry:
1614 if entry.changed():
1638 if entry.changed():
1615 entry.obj = self.func(obj)
1639 entry.obj = self.func(obj)
1616 else:
1640 else:
1617 paths = [self.join(obj, path) for path in self.paths]
1641 paths = [self.join(obj, path) for path in self.paths]
1618
1642
1619 # We stat -before- creating the object so our cache doesn't lie if
1643 # We stat -before- creating the object so our cache doesn't lie if
1620 # a writer modified between the time we read and stat
1644 # a writer modified between the time we read and stat
1621 entry = filecacheentry(paths, True)
1645 entry = filecacheentry(paths, True)
1622 entry.obj = self.func(obj)
1646 entry.obj = self.func(obj)
1623
1647
1624 obj._filecache[self.name] = entry
1648 obj._filecache[self.name] = entry
1625
1649
1626 obj.__dict__[self.sname] = entry.obj
1650 obj.__dict__[self.sname] = entry.obj
1627 return entry.obj
1651 return entry.obj
1628
1652
1629 # don't implement __set__(), which would make __dict__ lookup as slow as
1653 # don't implement __set__(), which would make __dict__ lookup as slow as
1630 # function call.
1654 # function call.
1631
1655
1632 def set(self, obj, value):
1656 def set(self, obj, value):
1633 if self.name not in obj._filecache:
1657 if self.name not in obj._filecache:
1634 # we add an entry for the missing value because X in __dict__
1658 # we add an entry for the missing value because X in __dict__
1635 # implies X in _filecache
1659 # implies X in _filecache
1636 paths = [self.join(obj, path) for path in self.paths]
1660 paths = [self.join(obj, path) for path in self.paths]
1637 ce = filecacheentry(paths, False)
1661 ce = filecacheentry(paths, False)
1638 obj._filecache[self.name] = ce
1662 obj._filecache[self.name] = ce
1639 else:
1663 else:
1640 ce = obj._filecache[self.name]
1664 ce = obj._filecache[self.name]
1641
1665
1642 ce.obj = value # update cached copy
1666 ce.obj = value # update cached copy
1643 obj.__dict__[self.sname] = value # update copy returned by obj.x
1667 obj.__dict__[self.sname] = value # update copy returned by obj.x
1644
1668
1645
1669
1646 def extdatasource(repo, source):
1670 def extdatasource(repo, source):
1647 """Gather a map of rev -> value dict from the specified source
1671 """Gather a map of rev -> value dict from the specified source
1648
1672
1649 A source spec is treated as a URL, with a special case shell: type
1673 A source spec is treated as a URL, with a special case shell: type
1650 for parsing the output from a shell command.
1674 for parsing the output from a shell command.
1651
1675
1652 The data is parsed as a series of newline-separated records where
1676 The data is parsed as a series of newline-separated records where
1653 each record is a revision specifier optionally followed by a space
1677 each record is a revision specifier optionally followed by a space
1654 and a freeform string value. If the revision is known locally, it
1678 and a freeform string value. If the revision is known locally, it
1655 is converted to a rev, otherwise the record is skipped.
1679 is converted to a rev, otherwise the record is skipped.
1656
1680
1657 Note that both key and value are treated as UTF-8 and converted to
1681 Note that both key and value are treated as UTF-8 and converted to
1658 the local encoding. This allows uniformity between local and
1682 the local encoding. This allows uniformity between local and
1659 remote data sources.
1683 remote data sources.
1660 """
1684 """
1661
1685
1662 spec = repo.ui.config(b"extdata", source)
1686 spec = repo.ui.config(b"extdata", source)
1663 if not spec:
1687 if not spec:
1664 raise error.Abort(_(b"unknown extdata source '%s'") % source)
1688 raise error.Abort(_(b"unknown extdata source '%s'") % source)
1665
1689
1666 data = {}
1690 data = {}
1667 src = proc = None
1691 src = proc = None
1668 try:
1692 try:
1669 if spec.startswith(b"shell:"):
1693 if spec.startswith(b"shell:"):
1670 # external commands should be run relative to the repo root
1694 # external commands should be run relative to the repo root
1671 cmd = spec[6:]
1695 cmd = spec[6:]
1672 proc = subprocess.Popen(
1696 proc = subprocess.Popen(
1673 procutil.tonativestr(cmd),
1697 procutil.tonativestr(cmd),
1674 shell=True,
1698 shell=True,
1675 bufsize=-1,
1699 bufsize=-1,
1676 close_fds=procutil.closefds,
1700 close_fds=procutil.closefds,
1677 stdout=subprocess.PIPE,
1701 stdout=subprocess.PIPE,
1678 cwd=procutil.tonativestr(repo.root),
1702 cwd=procutil.tonativestr(repo.root),
1679 )
1703 )
1680 src = proc.stdout
1704 src = proc.stdout
1681 else:
1705 else:
1682 # treat as a URL or file
1706 # treat as a URL or file
1683 src = url.open(repo.ui, spec)
1707 src = url.open(repo.ui, spec)
1684 for l in src:
1708 for l in src:
1685 if b" " in l:
1709 if b" " in l:
1686 k, v = l.strip().split(b" ", 1)
1710 k, v = l.strip().split(b" ", 1)
1687 else:
1711 else:
1688 k, v = l.strip(), b""
1712 k, v = l.strip(), b""
1689
1713
1690 k = encoding.tolocal(k)
1714 k = encoding.tolocal(k)
1691 try:
1715 try:
1692 data[revsingle(repo, k).rev()] = encoding.tolocal(v)
1716 data[revsingle(repo, k).rev()] = encoding.tolocal(v)
1693 except (error.LookupError, error.RepoLookupError):
1717 except (error.LookupError, error.RepoLookupError):
1694 pass # we ignore data for nodes that don't exist locally
1718 pass # we ignore data for nodes that don't exist locally
1695 finally:
1719 finally:
1696 if proc:
1720 if proc:
1697 try:
1721 try:
1698 proc.communicate()
1722 proc.communicate()
1699 except ValueError:
1723 except ValueError:
1700 # This happens if we started iterating src and then
1724 # This happens if we started iterating src and then
1701 # get a parse error on a line. It should be safe to ignore.
1725 # get a parse error on a line. It should be safe to ignore.
1702 pass
1726 pass
1703 if src:
1727 if src:
1704 src.close()
1728 src.close()
1705 if proc and proc.returncode != 0:
1729 if proc and proc.returncode != 0:
1706 raise error.Abort(
1730 raise error.Abort(
1707 _(b"extdata command '%s' failed: %s")
1731 _(b"extdata command '%s' failed: %s")
1708 % (cmd, procutil.explainexit(proc.returncode))
1732 % (cmd, procutil.explainexit(proc.returncode))
1709 )
1733 )
1710
1734
1711 return data
1735 return data
1712
1736
1713
1737
1714 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1738 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1715 if lock is None:
1739 if lock is None:
1716 raise error.LockInheritanceContractViolation(
1740 raise error.LockInheritanceContractViolation(
1717 b'lock can only be inherited while held'
1741 b'lock can only be inherited while held'
1718 )
1742 )
1719 if environ is None:
1743 if environ is None:
1720 environ = {}
1744 environ = {}
1721 with lock.inherit() as locker:
1745 with lock.inherit() as locker:
1722 environ[envvar] = locker
1746 environ[envvar] = locker
1723 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1747 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1724
1748
1725
1749
1726 def wlocksub(repo, cmd, *args, **kwargs):
1750 def wlocksub(repo, cmd, *args, **kwargs):
1727 """run cmd as a subprocess that allows inheriting repo's wlock
1751 """run cmd as a subprocess that allows inheriting repo's wlock
1728
1752
1729 This can only be called while the wlock is held. This takes all the
1753 This can only be called while the wlock is held. This takes all the
1730 arguments that ui.system does, and returns the exit code of the
1754 arguments that ui.system does, and returns the exit code of the
1731 subprocess."""
1755 subprocess."""
1732 return _locksub(
1756 return _locksub(
1733 repo, repo.currentwlock(), b'HG_WLOCK_LOCKER', cmd, *args, **kwargs
1757 repo, repo.currentwlock(), b'HG_WLOCK_LOCKER', cmd, *args, **kwargs
1734 )
1758 )
1735
1759
1736
1760
1737 class progress(object):
1761 class progress(object):
1738 def __init__(self, ui, updatebar, topic, unit=b"", total=None):
1762 def __init__(self, ui, updatebar, topic, unit=b"", total=None):
1739 self.ui = ui
1763 self.ui = ui
1740 self.pos = 0
1764 self.pos = 0
1741 self.topic = topic
1765 self.topic = topic
1742 self.unit = unit
1766 self.unit = unit
1743 self.total = total
1767 self.total = total
1744 self.debug = ui.configbool(b'progress', b'debug')
1768 self.debug = ui.configbool(b'progress', b'debug')
1745 self._updatebar = updatebar
1769 self._updatebar = updatebar
1746
1770
1747 def __enter__(self):
1771 def __enter__(self):
1748 return self
1772 return self
1749
1773
1750 def __exit__(self, exc_type, exc_value, exc_tb):
1774 def __exit__(self, exc_type, exc_value, exc_tb):
1751 self.complete()
1775 self.complete()
1752
1776
1753 def update(self, pos, item=b"", total=None):
1777 def update(self, pos, item=b"", total=None):
1754 assert pos is not None
1778 assert pos is not None
1755 if total:
1779 if total:
1756 self.total = total
1780 self.total = total
1757 self.pos = pos
1781 self.pos = pos
1758 self._updatebar(self.topic, self.pos, item, self.unit, self.total)
1782 self._updatebar(self.topic, self.pos, item, self.unit, self.total)
1759 if self.debug:
1783 if self.debug:
1760 self._printdebug(item)
1784 self._printdebug(item)
1761
1785
1762 def increment(self, step=1, item=b"", total=None):
1786 def increment(self, step=1, item=b"", total=None):
1763 self.update(self.pos + step, item, total)
1787 self.update(self.pos + step, item, total)
1764
1788
1765 def complete(self):
1789 def complete(self):
1766 self.pos = None
1790 self.pos = None
1767 self.unit = b""
1791 self.unit = b""
1768 self.total = None
1792 self.total = None
1769 self._updatebar(self.topic, self.pos, b"", self.unit, self.total)
1793 self._updatebar(self.topic, self.pos, b"", self.unit, self.total)
1770
1794
1771 def _printdebug(self, item):
1795 def _printdebug(self, item):
1772 unit = b''
1796 unit = b''
1773 if self.unit:
1797 if self.unit:
1774 unit = b' ' + self.unit
1798 unit = b' ' + self.unit
1775 if item:
1799 if item:
1776 item = b' ' + item
1800 item = b' ' + item
1777
1801
1778 if self.total:
1802 if self.total:
1779 pct = 100.0 * self.pos / self.total
1803 pct = 100.0 * self.pos / self.total
1780 self.ui.debug(
1804 self.ui.debug(
1781 b'%s:%s %d/%d%s (%4.2f%%)\n'
1805 b'%s:%s %d/%d%s (%4.2f%%)\n'
1782 % (self.topic, item, self.pos, self.total, unit, pct)
1806 % (self.topic, item, self.pos, self.total, unit, pct)
1783 )
1807 )
1784 else:
1808 else:
1785 self.ui.debug(b'%s:%s %d%s\n' % (self.topic, item, self.pos, unit))
1809 self.ui.debug(b'%s:%s %d%s\n' % (self.topic, item, self.pos, unit))
1786
1810
1787
1811
1788 def gdinitconfig(ui):
1812 def gdinitconfig(ui):
1789 """helper function to know if a repo should be created as general delta
1813 """helper function to know if a repo should be created as general delta
1790 """
1814 """
1791 # experimental config: format.generaldelta
1815 # experimental config: format.generaldelta
1792 return ui.configbool(b'format', b'generaldelta') or ui.configbool(
1816 return ui.configbool(b'format', b'generaldelta') or ui.configbool(
1793 b'format', b'usegeneraldelta'
1817 b'format', b'usegeneraldelta'
1794 )
1818 )
1795
1819
1796
1820
1797 def gddeltaconfig(ui):
1821 def gddeltaconfig(ui):
1798 """helper function to know if incoming delta should be optimised
1822 """helper function to know if incoming delta should be optimised
1799 """
1823 """
1800 # experimental config: format.generaldelta
1824 # experimental config: format.generaldelta
1801 return ui.configbool(b'format', b'generaldelta')
1825 return ui.configbool(b'format', b'generaldelta')
1802
1826
1803
1827
1804 class simplekeyvaluefile(object):
1828 class simplekeyvaluefile(object):
1805 """A simple file with key=value lines
1829 """A simple file with key=value lines
1806
1830
1807 Keys must be alphanumerics and start with a letter, values must not
1831 Keys must be alphanumerics and start with a letter, values must not
1808 contain '\n' characters"""
1832 contain '\n' characters"""
1809
1833
1810 firstlinekey = b'__firstline'
1834 firstlinekey = b'__firstline'
1811
1835
1812 def __init__(self, vfs, path, keys=None):
1836 def __init__(self, vfs, path, keys=None):
1813 self.vfs = vfs
1837 self.vfs = vfs
1814 self.path = path
1838 self.path = path
1815
1839
1816 def read(self, firstlinenonkeyval=False):
1840 def read(self, firstlinenonkeyval=False):
1817 """Read the contents of a simple key-value file
1841 """Read the contents of a simple key-value file
1818
1842
1819 'firstlinenonkeyval' indicates whether the first line of file should
1843 'firstlinenonkeyval' indicates whether the first line of file should
1820 be treated as a key-value pair or reuturned fully under the
1844 be treated as a key-value pair or reuturned fully under the
1821 __firstline key."""
1845 __firstline key."""
1822 lines = self.vfs.readlines(self.path)
1846 lines = self.vfs.readlines(self.path)
1823 d = {}
1847 d = {}
1824 if firstlinenonkeyval:
1848 if firstlinenonkeyval:
1825 if not lines:
1849 if not lines:
1826 e = _(b"empty simplekeyvalue file")
1850 e = _(b"empty simplekeyvalue file")
1827 raise error.CorruptedState(e)
1851 raise error.CorruptedState(e)
1828 # we don't want to include '\n' in the __firstline
1852 # we don't want to include '\n' in the __firstline
1829 d[self.firstlinekey] = lines[0][:-1]
1853 d[self.firstlinekey] = lines[0][:-1]
1830 del lines[0]
1854 del lines[0]
1831
1855
1832 try:
1856 try:
1833 # the 'if line.strip()' part prevents us from failing on empty
1857 # the 'if line.strip()' part prevents us from failing on empty
1834 # lines which only contain '\n' therefore are not skipped
1858 # lines which only contain '\n' therefore are not skipped
1835 # by 'if line'
1859 # by 'if line'
1836 updatedict = dict(
1860 updatedict = dict(
1837 line[:-1].split(b'=', 1) for line in lines if line.strip()
1861 line[:-1].split(b'=', 1) for line in lines if line.strip()
1838 )
1862 )
1839 if self.firstlinekey in updatedict:
1863 if self.firstlinekey in updatedict:
1840 e = _(b"%r can't be used as a key")
1864 e = _(b"%r can't be used as a key")
1841 raise error.CorruptedState(e % self.firstlinekey)
1865 raise error.CorruptedState(e % self.firstlinekey)
1842 d.update(updatedict)
1866 d.update(updatedict)
1843 except ValueError as e:
1867 except ValueError as e:
1844 raise error.CorruptedState(stringutil.forcebytestr(e))
1868 raise error.CorruptedState(stringutil.forcebytestr(e))
1845 return d
1869 return d
1846
1870
1847 def write(self, data, firstline=None):
1871 def write(self, data, firstline=None):
1848 """Write key=>value mapping to a file
1872 """Write key=>value mapping to a file
1849 data is a dict. Keys must be alphanumerical and start with a letter.
1873 data is a dict. Keys must be alphanumerical and start with a letter.
1850 Values must not contain newline characters.
1874 Values must not contain newline characters.
1851
1875
1852 If 'firstline' is not None, it is written to file before
1876 If 'firstline' is not None, it is written to file before
1853 everything else, as it is, not in a key=value form"""
1877 everything else, as it is, not in a key=value form"""
1854 lines = []
1878 lines = []
1855 if firstline is not None:
1879 if firstline is not None:
1856 lines.append(b'%s\n' % firstline)
1880 lines.append(b'%s\n' % firstline)
1857
1881
1858 for k, v in data.items():
1882 for k, v in data.items():
1859 if k == self.firstlinekey:
1883 if k == self.firstlinekey:
1860 e = b"key name '%s' is reserved" % self.firstlinekey
1884 e = b"key name '%s' is reserved" % self.firstlinekey
1861 raise error.ProgrammingError(e)
1885 raise error.ProgrammingError(e)
1862 if not k[0:1].isalpha():
1886 if not k[0:1].isalpha():
1863 e = b"keys must start with a letter in a key-value file"
1887 e = b"keys must start with a letter in a key-value file"
1864 raise error.ProgrammingError(e)
1888 raise error.ProgrammingError(e)
1865 if not k.isalnum():
1889 if not k.isalnum():
1866 e = b"invalid key name in a simple key-value file"
1890 e = b"invalid key name in a simple key-value file"
1867 raise error.ProgrammingError(e)
1891 raise error.ProgrammingError(e)
1868 if b'\n' in v:
1892 if b'\n' in v:
1869 e = b"invalid value in a simple key-value file"
1893 e = b"invalid value in a simple key-value file"
1870 raise error.ProgrammingError(e)
1894 raise error.ProgrammingError(e)
1871 lines.append(b"%s=%s\n" % (k, v))
1895 lines.append(b"%s=%s\n" % (k, v))
1872 with self.vfs(self.path, mode=b'wb', atomictemp=True) as fp:
1896 with self.vfs(self.path, mode=b'wb', atomictemp=True) as fp:
1873 fp.write(b''.join(lines))
1897 fp.write(b''.join(lines))
1874
1898
1875
1899
1876 _reportobsoletedsource = [
1900 _reportobsoletedsource = [
1877 b'debugobsolete',
1901 b'debugobsolete',
1878 b'pull',
1902 b'pull',
1879 b'push',
1903 b'push',
1880 b'serve',
1904 b'serve',
1881 b'unbundle',
1905 b'unbundle',
1882 ]
1906 ]
1883
1907
1884 _reportnewcssource = [
1908 _reportnewcssource = [
1885 b'pull',
1909 b'pull',
1886 b'unbundle',
1910 b'unbundle',
1887 ]
1911 ]
1888
1912
1889
1913
1890 def prefetchfiles(repo, revmatches):
1914 def prefetchfiles(repo, revmatches):
1891 """Invokes the registered file prefetch functions, allowing extensions to
1915 """Invokes the registered file prefetch functions, allowing extensions to
1892 ensure the corresponding files are available locally, before the command
1916 ensure the corresponding files are available locally, before the command
1893 uses them.
1917 uses them.
1894
1918
1895 Args:
1919 Args:
1896 revmatches: a list of (revision, match) tuples to indicate the files to
1920 revmatches: a list of (revision, match) tuples to indicate the files to
1897 fetch at each revision. If any of the match elements is None, it matches
1921 fetch at each revision. If any of the match elements is None, it matches
1898 all files.
1922 all files.
1899 """
1923 """
1900
1924
1901 def _matcher(m):
1925 def _matcher(m):
1902 if m:
1926 if m:
1903 assert isinstance(m, matchmod.basematcher)
1927 assert isinstance(m, matchmod.basematcher)
1904 # The command itself will complain about files that don't exist, so
1928 # The command itself will complain about files that don't exist, so
1905 # don't duplicate the message.
1929 # don't duplicate the message.
1906 return matchmod.badmatch(m, lambda fn, msg: None)
1930 return matchmod.badmatch(m, lambda fn, msg: None)
1907 else:
1931 else:
1908 return matchall(repo)
1932 return matchall(repo)
1909
1933
1910 revbadmatches = [(rev, _matcher(match)) for (rev, match) in revmatches]
1934 revbadmatches = [(rev, _matcher(match)) for (rev, match) in revmatches]
1911
1935
1912 fileprefetchhooks(repo, revbadmatches)
1936 fileprefetchhooks(repo, revbadmatches)
1913
1937
1914
1938
1915 # a list of (repo, revs, match) prefetch functions
1939 # a list of (repo, revs, match) prefetch functions
1916 fileprefetchhooks = util.hooks()
1940 fileprefetchhooks = util.hooks()
1917
1941
1918 # A marker that tells the evolve extension to suppress its own reporting
1942 # A marker that tells the evolve extension to suppress its own reporting
1919 _reportstroubledchangesets = True
1943 _reportstroubledchangesets = True
1920
1944
1921
1945
1922 def registersummarycallback(repo, otr, txnname=b'', as_validator=False):
1946 def registersummarycallback(repo, otr, txnname=b'', as_validator=False):
1923 """register a callback to issue a summary after the transaction is closed
1947 """register a callback to issue a summary after the transaction is closed
1924
1948
1925 If as_validator is true, then the callbacks are registered as transaction
1949 If as_validator is true, then the callbacks are registered as transaction
1926 validators instead
1950 validators instead
1927 """
1951 """
1928
1952
1929 def txmatch(sources):
1953 def txmatch(sources):
1930 return any(txnname.startswith(source) for source in sources)
1954 return any(txnname.startswith(source) for source in sources)
1931
1955
1932 categories = []
1956 categories = []
1933
1957
1934 def reportsummary(func):
1958 def reportsummary(func):
1935 """decorator for report callbacks."""
1959 """decorator for report callbacks."""
1936 # The repoview life cycle is shorter than the one of the actual
1960 # The repoview life cycle is shorter than the one of the actual
1937 # underlying repository. So the filtered object can die before the
1961 # underlying repository. So the filtered object can die before the
1938 # weakref is used leading to troubles. We keep a reference to the
1962 # weakref is used leading to troubles. We keep a reference to the
1939 # unfiltered object and restore the filtering when retrieving the
1963 # unfiltered object and restore the filtering when retrieving the
1940 # repository through the weakref.
1964 # repository through the weakref.
1941 filtername = repo.filtername
1965 filtername = repo.filtername
1942 reporef = weakref.ref(repo.unfiltered())
1966 reporef = weakref.ref(repo.unfiltered())
1943
1967
1944 def wrapped(tr):
1968 def wrapped(tr):
1945 repo = reporef()
1969 repo = reporef()
1946 if filtername:
1970 if filtername:
1947 assert repo is not None # help pytype
1971 assert repo is not None # help pytype
1948 repo = repo.filtered(filtername)
1972 repo = repo.filtered(filtername)
1949 func(repo, tr)
1973 func(repo, tr)
1950
1974
1951 newcat = b'%02i-txnreport' % len(categories)
1975 newcat = b'%02i-txnreport' % len(categories)
1952 if as_validator:
1976 if as_validator:
1953 otr.addvalidator(newcat, wrapped)
1977 otr.addvalidator(newcat, wrapped)
1954 else:
1978 else:
1955 otr.addpostclose(newcat, wrapped)
1979 otr.addpostclose(newcat, wrapped)
1956 categories.append(newcat)
1980 categories.append(newcat)
1957 return wrapped
1981 return wrapped
1958
1982
1959 @reportsummary
1983 @reportsummary
1960 def reportchangegroup(repo, tr):
1984 def reportchangegroup(repo, tr):
1961 cgchangesets = tr.changes.get(b'changegroup-count-changesets', 0)
1985 cgchangesets = tr.changes.get(b'changegroup-count-changesets', 0)
1962 cgrevisions = tr.changes.get(b'changegroup-count-revisions', 0)
1986 cgrevisions = tr.changes.get(b'changegroup-count-revisions', 0)
1963 cgfiles = tr.changes.get(b'changegroup-count-files', 0)
1987 cgfiles = tr.changes.get(b'changegroup-count-files', 0)
1964 cgheads = tr.changes.get(b'changegroup-count-heads', 0)
1988 cgheads = tr.changes.get(b'changegroup-count-heads', 0)
1965 if cgchangesets or cgrevisions or cgfiles:
1989 if cgchangesets or cgrevisions or cgfiles:
1966 htext = b""
1990 htext = b""
1967 if cgheads:
1991 if cgheads:
1968 htext = _(b" (%+d heads)") % cgheads
1992 htext = _(b" (%+d heads)") % cgheads
1969 msg = _(b"added %d changesets with %d changes to %d files%s\n")
1993 msg = _(b"added %d changesets with %d changes to %d files%s\n")
1970 if as_validator:
1994 if as_validator:
1971 msg = _(b"adding %d changesets with %d changes to %d files%s\n")
1995 msg = _(b"adding %d changesets with %d changes to %d files%s\n")
1972 assert repo is not None # help pytype
1996 assert repo is not None # help pytype
1973 repo.ui.status(msg % (cgchangesets, cgrevisions, cgfiles, htext))
1997 repo.ui.status(msg % (cgchangesets, cgrevisions, cgfiles, htext))
1974
1998
1975 if txmatch(_reportobsoletedsource):
1999 if txmatch(_reportobsoletedsource):
1976
2000
1977 @reportsummary
2001 @reportsummary
1978 def reportobsoleted(repo, tr):
2002 def reportobsoleted(repo, tr):
1979 obsoleted = obsutil.getobsoleted(repo, tr)
2003 obsoleted = obsutil.getobsoleted(repo, tr)
1980 newmarkers = len(tr.changes.get(b'obsmarkers', ()))
2004 newmarkers = len(tr.changes.get(b'obsmarkers', ()))
1981 if newmarkers:
2005 if newmarkers:
1982 repo.ui.status(_(b'%i new obsolescence markers\n') % newmarkers)
2006 repo.ui.status(_(b'%i new obsolescence markers\n') % newmarkers)
1983 if obsoleted:
2007 if obsoleted:
1984 msg = _(b'obsoleted %i changesets\n')
2008 msg = _(b'obsoleted %i changesets\n')
1985 if as_validator:
2009 if as_validator:
1986 msg = _(b'obsoleting %i changesets\n')
2010 msg = _(b'obsoleting %i changesets\n')
1987 repo.ui.status(msg % len(obsoleted))
2011 repo.ui.status(msg % len(obsoleted))
1988
2012
1989 if obsolete.isenabled(
2013 if obsolete.isenabled(
1990 repo, obsolete.createmarkersopt
2014 repo, obsolete.createmarkersopt
1991 ) and repo.ui.configbool(
2015 ) and repo.ui.configbool(
1992 b'experimental', b'evolution.report-instabilities'
2016 b'experimental', b'evolution.report-instabilities'
1993 ):
2017 ):
1994 instabilitytypes = [
2018 instabilitytypes = [
1995 (b'orphan', b'orphan'),
2019 (b'orphan', b'orphan'),
1996 (b'phase-divergent', b'phasedivergent'),
2020 (b'phase-divergent', b'phasedivergent'),
1997 (b'content-divergent', b'contentdivergent'),
2021 (b'content-divergent', b'contentdivergent'),
1998 ]
2022 ]
1999
2023
2000 def getinstabilitycounts(repo):
2024 def getinstabilitycounts(repo):
2001 filtered = repo.changelog.filteredrevs
2025 filtered = repo.changelog.filteredrevs
2002 counts = {}
2026 counts = {}
2003 for instability, revset in instabilitytypes:
2027 for instability, revset in instabilitytypes:
2004 counts[instability] = len(
2028 counts[instability] = len(
2005 set(obsolete.getrevs(repo, revset)) - filtered
2029 set(obsolete.getrevs(repo, revset)) - filtered
2006 )
2030 )
2007 return counts
2031 return counts
2008
2032
2009 oldinstabilitycounts = getinstabilitycounts(repo)
2033 oldinstabilitycounts = getinstabilitycounts(repo)
2010
2034
2011 @reportsummary
2035 @reportsummary
2012 def reportnewinstabilities(repo, tr):
2036 def reportnewinstabilities(repo, tr):
2013 newinstabilitycounts = getinstabilitycounts(repo)
2037 newinstabilitycounts = getinstabilitycounts(repo)
2014 for instability, revset in instabilitytypes:
2038 for instability, revset in instabilitytypes:
2015 delta = (
2039 delta = (
2016 newinstabilitycounts[instability]
2040 newinstabilitycounts[instability]
2017 - oldinstabilitycounts[instability]
2041 - oldinstabilitycounts[instability]
2018 )
2042 )
2019 msg = getinstabilitymessage(delta, instability)
2043 msg = getinstabilitymessage(delta, instability)
2020 if msg:
2044 if msg:
2021 repo.ui.warn(msg)
2045 repo.ui.warn(msg)
2022
2046
2023 if txmatch(_reportnewcssource):
2047 if txmatch(_reportnewcssource):
2024
2048
2025 @reportsummary
2049 @reportsummary
2026 def reportnewcs(repo, tr):
2050 def reportnewcs(repo, tr):
2027 """Report the range of new revisions pulled/unbundled."""
2051 """Report the range of new revisions pulled/unbundled."""
2028 origrepolen = tr.changes.get(b'origrepolen', len(repo))
2052 origrepolen = tr.changes.get(b'origrepolen', len(repo))
2029 unfi = repo.unfiltered()
2053 unfi = repo.unfiltered()
2030 if origrepolen >= len(unfi):
2054 if origrepolen >= len(unfi):
2031 return
2055 return
2032
2056
2033 # Compute the bounds of new visible revisions' range.
2057 # Compute the bounds of new visible revisions' range.
2034 revs = smartset.spanset(repo, start=origrepolen)
2058 revs = smartset.spanset(repo, start=origrepolen)
2035 if revs:
2059 if revs:
2036 minrev, maxrev = repo[revs.min()], repo[revs.max()]
2060 minrev, maxrev = repo[revs.min()], repo[revs.max()]
2037
2061
2038 if minrev == maxrev:
2062 if minrev == maxrev:
2039 revrange = minrev
2063 revrange = minrev
2040 else:
2064 else:
2041 revrange = b'%s:%s' % (minrev, maxrev)
2065 revrange = b'%s:%s' % (minrev, maxrev)
2042 draft = len(repo.revs(b'%ld and draft()', revs))
2066 draft = len(repo.revs(b'%ld and draft()', revs))
2043 secret = len(repo.revs(b'%ld and secret()', revs))
2067 secret = len(repo.revs(b'%ld and secret()', revs))
2044 if not (draft or secret):
2068 if not (draft or secret):
2045 msg = _(b'new changesets %s\n') % revrange
2069 msg = _(b'new changesets %s\n') % revrange
2046 elif draft and secret:
2070 elif draft and secret:
2047 msg = _(b'new changesets %s (%d drafts, %d secrets)\n')
2071 msg = _(b'new changesets %s (%d drafts, %d secrets)\n')
2048 msg %= (revrange, draft, secret)
2072 msg %= (revrange, draft, secret)
2049 elif draft:
2073 elif draft:
2050 msg = _(b'new changesets %s (%d drafts)\n')
2074 msg = _(b'new changesets %s (%d drafts)\n')
2051 msg %= (revrange, draft)
2075 msg %= (revrange, draft)
2052 elif secret:
2076 elif secret:
2053 msg = _(b'new changesets %s (%d secrets)\n')
2077 msg = _(b'new changesets %s (%d secrets)\n')
2054 msg %= (revrange, secret)
2078 msg %= (revrange, secret)
2055 else:
2079 else:
2056 errormsg = b'entered unreachable condition'
2080 errormsg = b'entered unreachable condition'
2057 raise error.ProgrammingError(errormsg)
2081 raise error.ProgrammingError(errormsg)
2058 repo.ui.status(msg)
2082 repo.ui.status(msg)
2059
2083
2060 # search new changesets directly pulled as obsolete
2084 # search new changesets directly pulled as obsolete
2061 duplicates = tr.changes.get(b'revduplicates', ())
2085 duplicates = tr.changes.get(b'revduplicates', ())
2062 obsadded = unfi.revs(
2086 obsadded = unfi.revs(
2063 b'(%d: + %ld) and obsolete()', origrepolen, duplicates
2087 b'(%d: + %ld) and obsolete()', origrepolen, duplicates
2064 )
2088 )
2065 cl = repo.changelog
2089 cl = repo.changelog
2066 extinctadded = [r for r in obsadded if r not in cl]
2090 extinctadded = [r for r in obsadded if r not in cl]
2067 if extinctadded:
2091 if extinctadded:
2068 # They are not just obsolete, but obsolete and invisible
2092 # They are not just obsolete, but obsolete and invisible
2069 # we call them "extinct" internally but the terms have not been
2093 # we call them "extinct" internally but the terms have not been
2070 # exposed to users.
2094 # exposed to users.
2071 msg = b'(%d other changesets obsolete on arrival)\n'
2095 msg = b'(%d other changesets obsolete on arrival)\n'
2072 repo.ui.status(msg % len(extinctadded))
2096 repo.ui.status(msg % len(extinctadded))
2073
2097
2074 @reportsummary
2098 @reportsummary
2075 def reportphasechanges(repo, tr):
2099 def reportphasechanges(repo, tr):
2076 """Report statistics of phase changes for changesets pre-existing
2100 """Report statistics of phase changes for changesets pre-existing
2077 pull/unbundle.
2101 pull/unbundle.
2078 """
2102 """
2079 origrepolen = tr.changes.get(b'origrepolen', len(repo))
2103 origrepolen = tr.changes.get(b'origrepolen', len(repo))
2080 published = []
2104 published = []
2081 for revs, (old, new) in tr.changes.get(b'phases', []):
2105 for revs, (old, new) in tr.changes.get(b'phases', []):
2082 if new != phases.public:
2106 if new != phases.public:
2083 continue
2107 continue
2084 published.extend(rev for rev in revs if rev < origrepolen)
2108 published.extend(rev for rev in revs if rev < origrepolen)
2085 if not published:
2109 if not published:
2086 return
2110 return
2087 msg = _(b'%d local changesets published\n')
2111 msg = _(b'%d local changesets published\n')
2088 if as_validator:
2112 if as_validator:
2089 msg = _(b'%d local changesets will be published\n')
2113 msg = _(b'%d local changesets will be published\n')
2090 repo.ui.status(msg % len(published))
2114 repo.ui.status(msg % len(published))
2091
2115
2092
2116
2093 def getinstabilitymessage(delta, instability):
2117 def getinstabilitymessage(delta, instability):
2094 """function to return the message to show warning about new instabilities
2118 """function to return the message to show warning about new instabilities
2095
2119
2096 exists as a separate function so that extension can wrap to show more
2120 exists as a separate function so that extension can wrap to show more
2097 information like how to fix instabilities"""
2121 information like how to fix instabilities"""
2098 if delta > 0:
2122 if delta > 0:
2099 return _(b'%i new %s changesets\n') % (delta, instability)
2123 return _(b'%i new %s changesets\n') % (delta, instability)
2100
2124
2101
2125
2102 def nodesummaries(repo, nodes, maxnumnodes=4):
2126 def nodesummaries(repo, nodes, maxnumnodes=4):
2103 if len(nodes) <= maxnumnodes or repo.ui.verbose:
2127 if len(nodes) <= maxnumnodes or repo.ui.verbose:
2104 return b' '.join(short(h) for h in nodes)
2128 return b' '.join(short(h) for h in nodes)
2105 first = b' '.join(short(h) for h in nodes[:maxnumnodes])
2129 first = b' '.join(short(h) for h in nodes[:maxnumnodes])
2106 return _(b"%s and %d others") % (first, len(nodes) - maxnumnodes)
2130 return _(b"%s and %d others") % (first, len(nodes) - maxnumnodes)
2107
2131
2108
2132
2109 def enforcesinglehead(repo, tr, desc, accountclosed=False):
2133 def enforcesinglehead(repo, tr, desc, accountclosed=False):
2110 """check that no named branch has multiple heads"""
2134 """check that no named branch has multiple heads"""
2111 if desc in (b'strip', b'repair'):
2135 if desc in (b'strip', b'repair'):
2112 # skip the logic during strip
2136 # skip the logic during strip
2113 return
2137 return
2114 visible = repo.filtered(b'visible')
2138 visible = repo.filtered(b'visible')
2115 # possible improvement: we could restrict the check to affected branch
2139 # possible improvement: we could restrict the check to affected branch
2116 bm = visible.branchmap()
2140 bm = visible.branchmap()
2117 for name in bm:
2141 for name in bm:
2118 heads = bm.branchheads(name, closed=accountclosed)
2142 heads = bm.branchheads(name, closed=accountclosed)
2119 if len(heads) > 1:
2143 if len(heads) > 1:
2120 msg = _(b'rejecting multiple heads on branch "%s"')
2144 msg = _(b'rejecting multiple heads on branch "%s"')
2121 msg %= name
2145 msg %= name
2122 hint = _(b'%d heads: %s')
2146 hint = _(b'%d heads: %s')
2123 hint %= (len(heads), nodesummaries(repo, heads))
2147 hint %= (len(heads), nodesummaries(repo, heads))
2124 raise error.Abort(msg, hint=hint)
2148 raise error.Abort(msg, hint=hint)
2125
2149
2126
2150
2127 def wrapconvertsink(sink):
2151 def wrapconvertsink(sink):
2128 """Allow extensions to wrap the sink returned by convcmd.convertsink()
2152 """Allow extensions to wrap the sink returned by convcmd.convertsink()
2129 before it is used, whether or not the convert extension was formally loaded.
2153 before it is used, whether or not the convert extension was formally loaded.
2130 """
2154 """
2131 return sink
2155 return sink
2132
2156
2133
2157
2134 def unhidehashlikerevs(repo, specs, hiddentype):
2158 def unhidehashlikerevs(repo, specs, hiddentype):
2135 """parse the user specs and unhide changesets whose hash or revision number
2159 """parse the user specs and unhide changesets whose hash or revision number
2136 is passed.
2160 is passed.
2137
2161
2138 hiddentype can be: 1) 'warn': warn while unhiding changesets
2162 hiddentype can be: 1) 'warn': warn while unhiding changesets
2139 2) 'nowarn': don't warn while unhiding changesets
2163 2) 'nowarn': don't warn while unhiding changesets
2140
2164
2141 returns a repo object with the required changesets unhidden
2165 returns a repo object with the required changesets unhidden
2142 """
2166 """
2143 if not repo.filtername or not repo.ui.configbool(
2167 if not repo.filtername or not repo.ui.configbool(
2144 b'experimental', b'directaccess'
2168 b'experimental', b'directaccess'
2145 ):
2169 ):
2146 return repo
2170 return repo
2147
2171
2148 if repo.filtername not in (b'visible', b'visible-hidden'):
2172 if repo.filtername not in (b'visible', b'visible-hidden'):
2149 return repo
2173 return repo
2150
2174
2151 symbols = set()
2175 symbols = set()
2152 for spec in specs:
2176 for spec in specs:
2153 try:
2177 try:
2154 tree = revsetlang.parse(spec)
2178 tree = revsetlang.parse(spec)
2155 except error.ParseError: # will be reported by scmutil.revrange()
2179 except error.ParseError: # will be reported by scmutil.revrange()
2156 continue
2180 continue
2157
2181
2158 symbols.update(revsetlang.gethashlikesymbols(tree))
2182 symbols.update(revsetlang.gethashlikesymbols(tree))
2159
2183
2160 if not symbols:
2184 if not symbols:
2161 return repo
2185 return repo
2162
2186
2163 revs = _getrevsfromsymbols(repo, symbols)
2187 revs = _getrevsfromsymbols(repo, symbols)
2164
2188
2165 if not revs:
2189 if not revs:
2166 return repo
2190 return repo
2167
2191
2168 if hiddentype == b'warn':
2192 if hiddentype == b'warn':
2169 unfi = repo.unfiltered()
2193 unfi = repo.unfiltered()
2170 revstr = b", ".join([pycompat.bytestr(unfi[l]) for l in revs])
2194 revstr = b", ".join([pycompat.bytestr(unfi[l]) for l in revs])
2171 repo.ui.warn(
2195 repo.ui.warn(
2172 _(
2196 _(
2173 b"warning: accessing hidden changesets for write "
2197 b"warning: accessing hidden changesets for write "
2174 b"operation: %s\n"
2198 b"operation: %s\n"
2175 )
2199 )
2176 % revstr
2200 % revstr
2177 )
2201 )
2178
2202
2179 # we have to use new filtername to separate branch/tags cache until we can
2203 # we have to use new filtername to separate branch/tags cache until we can
2180 # disbale these cache when revisions are dynamically pinned.
2204 # disbale these cache when revisions are dynamically pinned.
2181 return repo.filtered(b'visible-hidden', revs)
2205 return repo.filtered(b'visible-hidden', revs)
2182
2206
2183
2207
2184 def _getrevsfromsymbols(repo, symbols):
2208 def _getrevsfromsymbols(repo, symbols):
2185 """parse the list of symbols and returns a set of revision numbers of hidden
2209 """parse the list of symbols and returns a set of revision numbers of hidden
2186 changesets present in symbols"""
2210 changesets present in symbols"""
2187 revs = set()
2211 revs = set()
2188 unfi = repo.unfiltered()
2212 unfi = repo.unfiltered()
2189 unficl = unfi.changelog
2213 unficl = unfi.changelog
2190 cl = repo.changelog
2214 cl = repo.changelog
2191 tiprev = len(unficl)
2215 tiprev = len(unficl)
2192 allowrevnums = repo.ui.configbool(b'experimental', b'directaccess.revnums')
2216 allowrevnums = repo.ui.configbool(b'experimental', b'directaccess.revnums')
2193 for s in symbols:
2217 for s in symbols:
2194 try:
2218 try:
2195 n = int(s)
2219 n = int(s)
2196 if n <= tiprev:
2220 if n <= tiprev:
2197 if not allowrevnums:
2221 if not allowrevnums:
2198 continue
2222 continue
2199 else:
2223 else:
2200 if n not in cl:
2224 if n not in cl:
2201 revs.add(n)
2225 revs.add(n)
2202 continue
2226 continue
2203 except ValueError:
2227 except ValueError:
2204 pass
2228 pass
2205
2229
2206 try:
2230 try:
2207 s = resolvehexnodeidprefix(unfi, s)
2231 s = resolvehexnodeidprefix(unfi, s)
2208 except (error.LookupError, error.WdirUnsupported):
2232 except (error.LookupError, error.WdirUnsupported):
2209 s = None
2233 s = None
2210
2234
2211 if s is not None:
2235 if s is not None:
2212 rev = unficl.rev(s)
2236 rev = unficl.rev(s)
2213 if rev not in cl:
2237 if rev not in cl:
2214 revs.add(rev)
2238 revs.add(rev)
2215
2239
2216 return revs
2240 return revs
2217
2241
2218
2242
2219 def bookmarkrevs(repo, mark):
2243 def bookmarkrevs(repo, mark):
2220 """
2244 """
2221 Select revisions reachable by a given bookmark
2245 Select revisions reachable by a given bookmark
2222 """
2246 """
2223 return repo.revs(
2247 return repo.revs(
2224 b"ancestors(bookmark(%s)) - "
2248 b"ancestors(bookmark(%s)) - "
2225 b"ancestors(head() and not bookmark(%s)) - "
2249 b"ancestors(head() and not bookmark(%s)) - "
2226 b"ancestors(bookmark() and not bookmark(%s))",
2250 b"ancestors(bookmark() and not bookmark(%s))",
2227 mark,
2251 mark,
2228 mark,
2252 mark,
2229 mark,
2253 mark,
2230 )
2254 )
General Comments 0
You need to be logged in to leave comments. Login now