##// END OF EJS Templates
scmutil: improve documentation of writereporequirements()...
Pulkit Goyal -
r46859:cb12658b default
parent child Browse files
Show More
@@ -1,2322 +1,2327 b''
1 # scmutil.py - Mercurial core utility functions
1 # scmutil.py - Mercurial core utility functions
2 #
2 #
3 # Copyright Matt Mackall <mpm@selenic.com>
3 # Copyright Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import glob
11 import glob
12 import os
12 import os
13 import posixpath
13 import posixpath
14 import re
14 import re
15 import subprocess
15 import subprocess
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 bin,
20 bin,
21 hex,
21 hex,
22 nullid,
22 nullid,
23 nullrev,
23 nullrev,
24 short,
24 short,
25 wdirid,
25 wdirid,
26 wdirrev,
26 wdirrev,
27 )
27 )
28 from .pycompat import getattr
28 from .pycompat import getattr
29 from .thirdparty import attr
29 from .thirdparty import attr
30 from . import (
30 from . import (
31 copies as copiesmod,
31 copies as copiesmod,
32 encoding,
32 encoding,
33 error,
33 error,
34 match as matchmod,
34 match as matchmod,
35 obsolete,
35 obsolete,
36 obsutil,
36 obsutil,
37 pathutil,
37 pathutil,
38 phases,
38 phases,
39 policy,
39 policy,
40 pycompat,
40 pycompat,
41 requirements as requirementsmod,
41 requirements as requirementsmod,
42 revsetlang,
42 revsetlang,
43 similar,
43 similar,
44 smartset,
44 smartset,
45 url,
45 url,
46 util,
46 util,
47 vfs,
47 vfs,
48 )
48 )
49
49
50 from .utils import (
50 from .utils import (
51 hashutil,
51 hashutil,
52 procutil,
52 procutil,
53 stringutil,
53 stringutil,
54 )
54 )
55
55
56 if pycompat.iswindows:
56 if pycompat.iswindows:
57 from . import scmwindows as scmplatform
57 from . import scmwindows as scmplatform
58 else:
58 else:
59 from . import scmposix as scmplatform
59 from . import scmposix as scmplatform
60
60
61 parsers = policy.importmod('parsers')
61 parsers = policy.importmod('parsers')
62 rustrevlog = policy.importrust('revlog')
62 rustrevlog = policy.importrust('revlog')
63
63
64 termsize = scmplatform.termsize
64 termsize = scmplatform.termsize
65
65
66
66
67 @attr.s(slots=True, repr=False)
67 @attr.s(slots=True, repr=False)
68 class status(object):
68 class status(object):
69 """Struct with a list of files per status.
69 """Struct with a list of files per status.
70
70
71 The 'deleted', 'unknown' and 'ignored' properties are only
71 The 'deleted', 'unknown' and 'ignored' properties are only
72 relevant to the working copy.
72 relevant to the working copy.
73 """
73 """
74
74
75 modified = attr.ib(default=attr.Factory(list))
75 modified = attr.ib(default=attr.Factory(list))
76 added = attr.ib(default=attr.Factory(list))
76 added = attr.ib(default=attr.Factory(list))
77 removed = attr.ib(default=attr.Factory(list))
77 removed = attr.ib(default=attr.Factory(list))
78 deleted = attr.ib(default=attr.Factory(list))
78 deleted = attr.ib(default=attr.Factory(list))
79 unknown = attr.ib(default=attr.Factory(list))
79 unknown = attr.ib(default=attr.Factory(list))
80 ignored = attr.ib(default=attr.Factory(list))
80 ignored = attr.ib(default=attr.Factory(list))
81 clean = attr.ib(default=attr.Factory(list))
81 clean = attr.ib(default=attr.Factory(list))
82
82
83 def __iter__(self):
83 def __iter__(self):
84 yield self.modified
84 yield self.modified
85 yield self.added
85 yield self.added
86 yield self.removed
86 yield self.removed
87 yield self.deleted
87 yield self.deleted
88 yield self.unknown
88 yield self.unknown
89 yield self.ignored
89 yield self.ignored
90 yield self.clean
90 yield self.clean
91
91
92 def __repr__(self):
92 def __repr__(self):
93 return (
93 return (
94 r'<status modified=%s, added=%s, removed=%s, deleted=%s, '
94 r'<status modified=%s, added=%s, removed=%s, deleted=%s, '
95 r'unknown=%s, ignored=%s, clean=%s>'
95 r'unknown=%s, ignored=%s, clean=%s>'
96 ) % tuple(pycompat.sysstr(stringutil.pprint(v)) for v in self)
96 ) % tuple(pycompat.sysstr(stringutil.pprint(v)) for v in self)
97
97
98
98
99 def itersubrepos(ctx1, ctx2):
99 def itersubrepos(ctx1, ctx2):
100 """find subrepos in ctx1 or ctx2"""
100 """find subrepos in ctx1 or ctx2"""
101 # Create a (subpath, ctx) mapping where we prefer subpaths from
101 # Create a (subpath, ctx) mapping where we prefer subpaths from
102 # ctx1. The subpaths from ctx2 are important when the .hgsub file
102 # ctx1. The subpaths from ctx2 are important when the .hgsub file
103 # has been modified (in ctx2) but not yet committed (in ctx1).
103 # has been modified (in ctx2) but not yet committed (in ctx1).
104 subpaths = dict.fromkeys(ctx2.substate, ctx2)
104 subpaths = dict.fromkeys(ctx2.substate, ctx2)
105 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
105 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
106
106
107 missing = set()
107 missing = set()
108
108
109 for subpath in ctx2.substate:
109 for subpath in ctx2.substate:
110 if subpath not in ctx1.substate:
110 if subpath not in ctx1.substate:
111 del subpaths[subpath]
111 del subpaths[subpath]
112 missing.add(subpath)
112 missing.add(subpath)
113
113
114 for subpath, ctx in sorted(pycompat.iteritems(subpaths)):
114 for subpath, ctx in sorted(pycompat.iteritems(subpaths)):
115 yield subpath, ctx.sub(subpath)
115 yield subpath, ctx.sub(subpath)
116
116
117 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
117 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
118 # status and diff will have an accurate result when it does
118 # status and diff will have an accurate result when it does
119 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
119 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
120 # against itself.
120 # against itself.
121 for subpath in missing:
121 for subpath in missing:
122 yield subpath, ctx2.nullsub(subpath, ctx1)
122 yield subpath, ctx2.nullsub(subpath, ctx1)
123
123
124
124
125 def nochangesfound(ui, repo, excluded=None):
125 def nochangesfound(ui, repo, excluded=None):
126 """Report no changes for push/pull, excluded is None or a list of
126 """Report no changes for push/pull, excluded is None or a list of
127 nodes excluded from the push/pull.
127 nodes excluded from the push/pull.
128 """
128 """
129 secretlist = []
129 secretlist = []
130 if excluded:
130 if excluded:
131 for n in excluded:
131 for n in excluded:
132 ctx = repo[n]
132 ctx = repo[n]
133 if ctx.phase() >= phases.secret and not ctx.extinct():
133 if ctx.phase() >= phases.secret and not ctx.extinct():
134 secretlist.append(n)
134 secretlist.append(n)
135
135
136 if secretlist:
136 if secretlist:
137 ui.status(
137 ui.status(
138 _(b"no changes found (ignored %d secret changesets)\n")
138 _(b"no changes found (ignored %d secret changesets)\n")
139 % len(secretlist)
139 % len(secretlist)
140 )
140 )
141 else:
141 else:
142 ui.status(_(b"no changes found\n"))
142 ui.status(_(b"no changes found\n"))
143
143
144
144
145 def callcatch(ui, func):
145 def callcatch(ui, func):
146 """call func() with global exception handling
146 """call func() with global exception handling
147
147
148 return func() if no exception happens. otherwise do some error handling
148 return func() if no exception happens. otherwise do some error handling
149 and return an exit code accordingly. does not handle all exceptions.
149 and return an exit code accordingly. does not handle all exceptions.
150 """
150 """
151 coarse_exit_code = -1
151 coarse_exit_code = -1
152 detailed_exit_code = -1
152 detailed_exit_code = -1
153 try:
153 try:
154 try:
154 try:
155 return func()
155 return func()
156 except: # re-raises
156 except: # re-raises
157 ui.traceback()
157 ui.traceback()
158 raise
158 raise
159 # Global exception handling, alphabetically
159 # Global exception handling, alphabetically
160 # Mercurial-specific first, followed by built-in and library exceptions
160 # Mercurial-specific first, followed by built-in and library exceptions
161 except error.LockHeld as inst:
161 except error.LockHeld as inst:
162 detailed_exit_code = 20
162 detailed_exit_code = 20
163 if inst.errno == errno.ETIMEDOUT:
163 if inst.errno == errno.ETIMEDOUT:
164 reason = _(b'timed out waiting for lock held by %r') % (
164 reason = _(b'timed out waiting for lock held by %r') % (
165 pycompat.bytestr(inst.locker)
165 pycompat.bytestr(inst.locker)
166 )
166 )
167 else:
167 else:
168 reason = _(b'lock held by %r') % inst.locker
168 reason = _(b'lock held by %r') % inst.locker
169 ui.error(
169 ui.error(
170 _(b"abort: %s: %s\n")
170 _(b"abort: %s: %s\n")
171 % (inst.desc or stringutil.forcebytestr(inst.filename), reason)
171 % (inst.desc or stringutil.forcebytestr(inst.filename), reason)
172 )
172 )
173 if not inst.locker:
173 if not inst.locker:
174 ui.error(_(b"(lock might be very busy)\n"))
174 ui.error(_(b"(lock might be very busy)\n"))
175 except error.LockUnavailable as inst:
175 except error.LockUnavailable as inst:
176 detailed_exit_code = 20
176 detailed_exit_code = 20
177 ui.error(
177 ui.error(
178 _(b"abort: could not lock %s: %s\n")
178 _(b"abort: could not lock %s: %s\n")
179 % (
179 % (
180 inst.desc or stringutil.forcebytestr(inst.filename),
180 inst.desc or stringutil.forcebytestr(inst.filename),
181 encoding.strtolocal(inst.strerror),
181 encoding.strtolocal(inst.strerror),
182 )
182 )
183 )
183 )
184 except error.OutOfBandError as inst:
184 except error.OutOfBandError as inst:
185 detailed_exit_code = 100
185 detailed_exit_code = 100
186 if inst.args:
186 if inst.args:
187 msg = _(b"abort: remote error:\n")
187 msg = _(b"abort: remote error:\n")
188 else:
188 else:
189 msg = _(b"abort: remote error\n")
189 msg = _(b"abort: remote error\n")
190 ui.error(msg)
190 ui.error(msg)
191 if inst.args:
191 if inst.args:
192 ui.error(b''.join(inst.args))
192 ui.error(b''.join(inst.args))
193 if inst.hint:
193 if inst.hint:
194 ui.error(b'(%s)\n' % inst.hint)
194 ui.error(b'(%s)\n' % inst.hint)
195 except error.RepoError as inst:
195 except error.RepoError as inst:
196 ui.error(_(b"abort: %s\n") % inst)
196 ui.error(_(b"abort: %s\n") % inst)
197 if inst.hint:
197 if inst.hint:
198 ui.error(_(b"(%s)\n") % inst.hint)
198 ui.error(_(b"(%s)\n") % inst.hint)
199 except error.ResponseError as inst:
199 except error.ResponseError as inst:
200 ui.error(_(b"abort: %s") % inst.args[0])
200 ui.error(_(b"abort: %s") % inst.args[0])
201 msg = inst.args[1]
201 msg = inst.args[1]
202 if isinstance(msg, type(u'')):
202 if isinstance(msg, type(u'')):
203 msg = pycompat.sysbytes(msg)
203 msg = pycompat.sysbytes(msg)
204 if not isinstance(msg, bytes):
204 if not isinstance(msg, bytes):
205 ui.error(b" %r\n" % (msg,))
205 ui.error(b" %r\n" % (msg,))
206 elif not msg:
206 elif not msg:
207 ui.error(_(b" empty string\n"))
207 ui.error(_(b" empty string\n"))
208 else:
208 else:
209 ui.error(b"\n%r\n" % pycompat.bytestr(stringutil.ellipsis(msg)))
209 ui.error(b"\n%r\n" % pycompat.bytestr(stringutil.ellipsis(msg)))
210 except error.CensoredNodeError as inst:
210 except error.CensoredNodeError as inst:
211 ui.error(_(b"abort: file censored %s\n") % inst)
211 ui.error(_(b"abort: file censored %s\n") % inst)
212 except error.StorageError as inst:
212 except error.StorageError as inst:
213 ui.error(_(b"abort: %s\n") % inst)
213 ui.error(_(b"abort: %s\n") % inst)
214 if inst.hint:
214 if inst.hint:
215 ui.error(_(b"(%s)\n") % inst.hint)
215 ui.error(_(b"(%s)\n") % inst.hint)
216 detailed_exit_code = 50
216 detailed_exit_code = 50
217 except error.InterventionRequired as inst:
217 except error.InterventionRequired as inst:
218 ui.error(b"%s\n" % inst)
218 ui.error(b"%s\n" % inst)
219 if inst.hint:
219 if inst.hint:
220 ui.error(_(b"(%s)\n") % inst.hint)
220 ui.error(_(b"(%s)\n") % inst.hint)
221 detailed_exit_code = 240
221 detailed_exit_code = 240
222 coarse_exit_code = 1
222 coarse_exit_code = 1
223 except error.WdirUnsupported:
223 except error.WdirUnsupported:
224 ui.error(_(b"abort: working directory revision cannot be specified\n"))
224 ui.error(_(b"abort: working directory revision cannot be specified\n"))
225 except error.Abort as inst:
225 except error.Abort as inst:
226 if isinstance(inst, (error.InputError, error.ParseError)):
226 if isinstance(inst, (error.InputError, error.ParseError)):
227 detailed_exit_code = 10
227 detailed_exit_code = 10
228 elif isinstance(inst, error.StateError):
228 elif isinstance(inst, error.StateError):
229 detailed_exit_code = 20
229 detailed_exit_code = 20
230 elif isinstance(inst, error.ConfigError):
230 elif isinstance(inst, error.ConfigError):
231 detailed_exit_code = 30
231 detailed_exit_code = 30
232 elif isinstance(inst, error.SecurityError):
232 elif isinstance(inst, error.SecurityError):
233 detailed_exit_code = 150
233 detailed_exit_code = 150
234 elif isinstance(inst, error.CanceledError):
234 elif isinstance(inst, error.CanceledError):
235 detailed_exit_code = 250
235 detailed_exit_code = 250
236 ui.error(inst.format())
236 ui.error(inst.format())
237 except error.WorkerError as inst:
237 except error.WorkerError as inst:
238 # Don't print a message -- the worker already should have
238 # Don't print a message -- the worker already should have
239 return inst.status_code
239 return inst.status_code
240 except ImportError as inst:
240 except ImportError as inst:
241 ui.error(_(b"abort: %s\n") % stringutil.forcebytestr(inst))
241 ui.error(_(b"abort: %s\n") % stringutil.forcebytestr(inst))
242 m = stringutil.forcebytestr(inst).split()[-1]
242 m = stringutil.forcebytestr(inst).split()[-1]
243 if m in b"mpatch bdiff".split():
243 if m in b"mpatch bdiff".split():
244 ui.error(_(b"(did you forget to compile extensions?)\n"))
244 ui.error(_(b"(did you forget to compile extensions?)\n"))
245 elif m in b"zlib".split():
245 elif m in b"zlib".split():
246 ui.error(_(b"(is your Python install correct?)\n"))
246 ui.error(_(b"(is your Python install correct?)\n"))
247 except util.urlerr.httperror as inst:
247 except util.urlerr.httperror as inst:
248 detailed_exit_code = 100
248 detailed_exit_code = 100
249 ui.error(_(b"abort: %s\n") % stringutil.forcebytestr(inst))
249 ui.error(_(b"abort: %s\n") % stringutil.forcebytestr(inst))
250 except util.urlerr.urlerror as inst:
250 except util.urlerr.urlerror as inst:
251 detailed_exit_code = 100
251 detailed_exit_code = 100
252 try: # usually it is in the form (errno, strerror)
252 try: # usually it is in the form (errno, strerror)
253 reason = inst.reason.args[1]
253 reason = inst.reason.args[1]
254 except (AttributeError, IndexError):
254 except (AttributeError, IndexError):
255 # it might be anything, for example a string
255 # it might be anything, for example a string
256 reason = inst.reason
256 reason = inst.reason
257 if isinstance(reason, pycompat.unicode):
257 if isinstance(reason, pycompat.unicode):
258 # SSLError of Python 2.7.9 contains a unicode
258 # SSLError of Python 2.7.9 contains a unicode
259 reason = encoding.unitolocal(reason)
259 reason = encoding.unitolocal(reason)
260 ui.error(_(b"abort: error: %s\n") % stringutil.forcebytestr(reason))
260 ui.error(_(b"abort: error: %s\n") % stringutil.forcebytestr(reason))
261 except (IOError, OSError) as inst:
261 except (IOError, OSError) as inst:
262 if (
262 if (
263 util.safehasattr(inst, b"args")
263 util.safehasattr(inst, b"args")
264 and inst.args
264 and inst.args
265 and inst.args[0] == errno.EPIPE
265 and inst.args[0] == errno.EPIPE
266 ):
266 ):
267 pass
267 pass
268 elif getattr(inst, "strerror", None): # common IOError or OSError
268 elif getattr(inst, "strerror", None): # common IOError or OSError
269 if getattr(inst, "filename", None) is not None:
269 if getattr(inst, "filename", None) is not None:
270 ui.error(
270 ui.error(
271 _(b"abort: %s: '%s'\n")
271 _(b"abort: %s: '%s'\n")
272 % (
272 % (
273 encoding.strtolocal(inst.strerror),
273 encoding.strtolocal(inst.strerror),
274 stringutil.forcebytestr(inst.filename),
274 stringutil.forcebytestr(inst.filename),
275 )
275 )
276 )
276 )
277 else:
277 else:
278 ui.error(_(b"abort: %s\n") % encoding.strtolocal(inst.strerror))
278 ui.error(_(b"abort: %s\n") % encoding.strtolocal(inst.strerror))
279 else: # suspicious IOError
279 else: # suspicious IOError
280 raise
280 raise
281 except MemoryError:
281 except MemoryError:
282 ui.error(_(b"abort: out of memory\n"))
282 ui.error(_(b"abort: out of memory\n"))
283 except SystemExit as inst:
283 except SystemExit as inst:
284 # Commands shouldn't sys.exit directly, but give a return code.
284 # Commands shouldn't sys.exit directly, but give a return code.
285 # Just in case catch this and and pass exit code to caller.
285 # Just in case catch this and and pass exit code to caller.
286 detailed_exit_code = 254
286 detailed_exit_code = 254
287 coarse_exit_code = inst.code
287 coarse_exit_code = inst.code
288
288
289 if ui.configbool(b'ui', b'detailed-exit-code'):
289 if ui.configbool(b'ui', b'detailed-exit-code'):
290 return detailed_exit_code
290 return detailed_exit_code
291 else:
291 else:
292 return coarse_exit_code
292 return coarse_exit_code
293
293
294
294
295 def checknewlabel(repo, lbl, kind):
295 def checknewlabel(repo, lbl, kind):
296 # Do not use the "kind" parameter in ui output.
296 # Do not use the "kind" parameter in ui output.
297 # It makes strings difficult to translate.
297 # It makes strings difficult to translate.
298 if lbl in [b'tip', b'.', b'null']:
298 if lbl in [b'tip', b'.', b'null']:
299 raise error.InputError(_(b"the name '%s' is reserved") % lbl)
299 raise error.InputError(_(b"the name '%s' is reserved") % lbl)
300 for c in (b':', b'\0', b'\n', b'\r'):
300 for c in (b':', b'\0', b'\n', b'\r'):
301 if c in lbl:
301 if c in lbl:
302 raise error.InputError(
302 raise error.InputError(
303 _(b"%r cannot be used in a name") % pycompat.bytestr(c)
303 _(b"%r cannot be used in a name") % pycompat.bytestr(c)
304 )
304 )
305 try:
305 try:
306 int(lbl)
306 int(lbl)
307 raise error.InputError(_(b"cannot use an integer as a name"))
307 raise error.InputError(_(b"cannot use an integer as a name"))
308 except ValueError:
308 except ValueError:
309 pass
309 pass
310 if lbl.strip() != lbl:
310 if lbl.strip() != lbl:
311 raise error.InputError(
311 raise error.InputError(
312 _(b"leading or trailing whitespace in name %r") % lbl
312 _(b"leading or trailing whitespace in name %r") % lbl
313 )
313 )
314
314
315
315
316 def checkfilename(f):
316 def checkfilename(f):
317 '''Check that the filename f is an acceptable filename for a tracked file'''
317 '''Check that the filename f is an acceptable filename for a tracked file'''
318 if b'\r' in f or b'\n' in f:
318 if b'\r' in f or b'\n' in f:
319 raise error.InputError(
319 raise error.InputError(
320 _(b"'\\n' and '\\r' disallowed in filenames: %r")
320 _(b"'\\n' and '\\r' disallowed in filenames: %r")
321 % pycompat.bytestr(f)
321 % pycompat.bytestr(f)
322 )
322 )
323
323
324
324
325 def checkportable(ui, f):
325 def checkportable(ui, f):
326 '''Check if filename f is portable and warn or abort depending on config'''
326 '''Check if filename f is portable and warn or abort depending on config'''
327 checkfilename(f)
327 checkfilename(f)
328 abort, warn = checkportabilityalert(ui)
328 abort, warn = checkportabilityalert(ui)
329 if abort or warn:
329 if abort or warn:
330 msg = util.checkwinfilename(f)
330 msg = util.checkwinfilename(f)
331 if msg:
331 if msg:
332 msg = b"%s: %s" % (msg, procutil.shellquote(f))
332 msg = b"%s: %s" % (msg, procutil.shellquote(f))
333 if abort:
333 if abort:
334 raise error.InputError(msg)
334 raise error.InputError(msg)
335 ui.warn(_(b"warning: %s\n") % msg)
335 ui.warn(_(b"warning: %s\n") % msg)
336
336
337
337
338 def checkportabilityalert(ui):
338 def checkportabilityalert(ui):
339 """check if the user's config requests nothing, a warning, or abort for
339 """check if the user's config requests nothing, a warning, or abort for
340 non-portable filenames"""
340 non-portable filenames"""
341 val = ui.config(b'ui', b'portablefilenames')
341 val = ui.config(b'ui', b'portablefilenames')
342 lval = val.lower()
342 lval = val.lower()
343 bval = stringutil.parsebool(val)
343 bval = stringutil.parsebool(val)
344 abort = pycompat.iswindows or lval == b'abort'
344 abort = pycompat.iswindows or lval == b'abort'
345 warn = bval or lval == b'warn'
345 warn = bval or lval == b'warn'
346 if bval is None and not (warn or abort or lval == b'ignore'):
346 if bval is None and not (warn or abort or lval == b'ignore'):
347 raise error.ConfigError(
347 raise error.ConfigError(
348 _(b"ui.portablefilenames value is invalid ('%s')") % val
348 _(b"ui.portablefilenames value is invalid ('%s')") % val
349 )
349 )
350 return abort, warn
350 return abort, warn
351
351
352
352
353 class casecollisionauditor(object):
353 class casecollisionauditor(object):
354 def __init__(self, ui, abort, dirstate):
354 def __init__(self, ui, abort, dirstate):
355 self._ui = ui
355 self._ui = ui
356 self._abort = abort
356 self._abort = abort
357 allfiles = b'\0'.join(dirstate)
357 allfiles = b'\0'.join(dirstate)
358 self._loweredfiles = set(encoding.lower(allfiles).split(b'\0'))
358 self._loweredfiles = set(encoding.lower(allfiles).split(b'\0'))
359 self._dirstate = dirstate
359 self._dirstate = dirstate
360 # The purpose of _newfiles is so that we don't complain about
360 # The purpose of _newfiles is so that we don't complain about
361 # case collisions if someone were to call this object with the
361 # case collisions if someone were to call this object with the
362 # same filename twice.
362 # same filename twice.
363 self._newfiles = set()
363 self._newfiles = set()
364
364
365 def __call__(self, f):
365 def __call__(self, f):
366 if f in self._newfiles:
366 if f in self._newfiles:
367 return
367 return
368 fl = encoding.lower(f)
368 fl = encoding.lower(f)
369 if fl in self._loweredfiles and f not in self._dirstate:
369 if fl in self._loweredfiles and f not in self._dirstate:
370 msg = _(b'possible case-folding collision for %s') % f
370 msg = _(b'possible case-folding collision for %s') % f
371 if self._abort:
371 if self._abort:
372 raise error.Abort(msg)
372 raise error.Abort(msg)
373 self._ui.warn(_(b"warning: %s\n") % msg)
373 self._ui.warn(_(b"warning: %s\n") % msg)
374 self._loweredfiles.add(fl)
374 self._loweredfiles.add(fl)
375 self._newfiles.add(f)
375 self._newfiles.add(f)
376
376
377
377
378 def filteredhash(repo, maxrev):
378 def filteredhash(repo, maxrev):
379 """build hash of filtered revisions in the current repoview.
379 """build hash of filtered revisions in the current repoview.
380
380
381 Multiple caches perform up-to-date validation by checking that the
381 Multiple caches perform up-to-date validation by checking that the
382 tiprev and tipnode stored in the cache file match the current repository.
382 tiprev and tipnode stored in the cache file match the current repository.
383 However, this is not sufficient for validating repoviews because the set
383 However, this is not sufficient for validating repoviews because the set
384 of revisions in the view may change without the repository tiprev and
384 of revisions in the view may change without the repository tiprev and
385 tipnode changing.
385 tipnode changing.
386
386
387 This function hashes all the revs filtered from the view and returns
387 This function hashes all the revs filtered from the view and returns
388 that SHA-1 digest.
388 that SHA-1 digest.
389 """
389 """
390 cl = repo.changelog
390 cl = repo.changelog
391 if not cl.filteredrevs:
391 if not cl.filteredrevs:
392 return None
392 return None
393 key = cl._filteredrevs_hashcache.get(maxrev)
393 key = cl._filteredrevs_hashcache.get(maxrev)
394 if not key:
394 if not key:
395 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
395 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
396 if revs:
396 if revs:
397 s = hashutil.sha1()
397 s = hashutil.sha1()
398 for rev in revs:
398 for rev in revs:
399 s.update(b'%d;' % rev)
399 s.update(b'%d;' % rev)
400 key = s.digest()
400 key = s.digest()
401 cl._filteredrevs_hashcache[maxrev] = key
401 cl._filteredrevs_hashcache[maxrev] = key
402 return key
402 return key
403
403
404
404
405 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
405 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
406 """yield every hg repository under path, always recursively.
406 """yield every hg repository under path, always recursively.
407 The recurse flag will only control recursion into repo working dirs"""
407 The recurse flag will only control recursion into repo working dirs"""
408
408
409 def errhandler(err):
409 def errhandler(err):
410 if err.filename == path:
410 if err.filename == path:
411 raise err
411 raise err
412
412
413 samestat = getattr(os.path, 'samestat', None)
413 samestat = getattr(os.path, 'samestat', None)
414 if followsym and samestat is not None:
414 if followsym and samestat is not None:
415
415
416 def adddir(dirlst, dirname):
416 def adddir(dirlst, dirname):
417 dirstat = os.stat(dirname)
417 dirstat = os.stat(dirname)
418 match = any(samestat(dirstat, lstdirstat) for lstdirstat in dirlst)
418 match = any(samestat(dirstat, lstdirstat) for lstdirstat in dirlst)
419 if not match:
419 if not match:
420 dirlst.append(dirstat)
420 dirlst.append(dirstat)
421 return not match
421 return not match
422
422
423 else:
423 else:
424 followsym = False
424 followsym = False
425
425
426 if (seen_dirs is None) and followsym:
426 if (seen_dirs is None) and followsym:
427 seen_dirs = []
427 seen_dirs = []
428 adddir(seen_dirs, path)
428 adddir(seen_dirs, path)
429 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
429 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
430 dirs.sort()
430 dirs.sort()
431 if b'.hg' in dirs:
431 if b'.hg' in dirs:
432 yield root # found a repository
432 yield root # found a repository
433 qroot = os.path.join(root, b'.hg', b'patches')
433 qroot = os.path.join(root, b'.hg', b'patches')
434 if os.path.isdir(os.path.join(qroot, b'.hg')):
434 if os.path.isdir(os.path.join(qroot, b'.hg')):
435 yield qroot # we have a patch queue repo here
435 yield qroot # we have a patch queue repo here
436 if recurse:
436 if recurse:
437 # avoid recursing inside the .hg directory
437 # avoid recursing inside the .hg directory
438 dirs.remove(b'.hg')
438 dirs.remove(b'.hg')
439 else:
439 else:
440 dirs[:] = [] # don't descend further
440 dirs[:] = [] # don't descend further
441 elif followsym:
441 elif followsym:
442 newdirs = []
442 newdirs = []
443 for d in dirs:
443 for d in dirs:
444 fname = os.path.join(root, d)
444 fname = os.path.join(root, d)
445 if adddir(seen_dirs, fname):
445 if adddir(seen_dirs, fname):
446 if os.path.islink(fname):
446 if os.path.islink(fname):
447 for hgname in walkrepos(fname, True, seen_dirs):
447 for hgname in walkrepos(fname, True, seen_dirs):
448 yield hgname
448 yield hgname
449 else:
449 else:
450 newdirs.append(d)
450 newdirs.append(d)
451 dirs[:] = newdirs
451 dirs[:] = newdirs
452
452
453
453
454 def binnode(ctx):
454 def binnode(ctx):
455 """Return binary node id for a given basectx"""
455 """Return binary node id for a given basectx"""
456 node = ctx.node()
456 node = ctx.node()
457 if node is None:
457 if node is None:
458 return wdirid
458 return wdirid
459 return node
459 return node
460
460
461
461
462 def intrev(ctx):
462 def intrev(ctx):
463 """Return integer for a given basectx that can be used in comparison or
463 """Return integer for a given basectx that can be used in comparison or
464 arithmetic operation"""
464 arithmetic operation"""
465 rev = ctx.rev()
465 rev = ctx.rev()
466 if rev is None:
466 if rev is None:
467 return wdirrev
467 return wdirrev
468 return rev
468 return rev
469
469
470
470
471 def formatchangeid(ctx):
471 def formatchangeid(ctx):
472 """Format changectx as '{rev}:{node|formatnode}', which is the default
472 """Format changectx as '{rev}:{node|formatnode}', which is the default
473 template provided by logcmdutil.changesettemplater"""
473 template provided by logcmdutil.changesettemplater"""
474 repo = ctx.repo()
474 repo = ctx.repo()
475 return formatrevnode(repo.ui, intrev(ctx), binnode(ctx))
475 return formatrevnode(repo.ui, intrev(ctx), binnode(ctx))
476
476
477
477
478 def formatrevnode(ui, rev, node):
478 def formatrevnode(ui, rev, node):
479 """Format given revision and node depending on the current verbosity"""
479 """Format given revision and node depending on the current verbosity"""
480 if ui.debugflag:
480 if ui.debugflag:
481 hexfunc = hex
481 hexfunc = hex
482 else:
482 else:
483 hexfunc = short
483 hexfunc = short
484 return b'%d:%s' % (rev, hexfunc(node))
484 return b'%d:%s' % (rev, hexfunc(node))
485
485
486
486
487 def resolvehexnodeidprefix(repo, prefix):
487 def resolvehexnodeidprefix(repo, prefix):
488 if prefix.startswith(b'x'):
488 if prefix.startswith(b'x'):
489 prefix = prefix[1:]
489 prefix = prefix[1:]
490 try:
490 try:
491 # Uses unfiltered repo because it's faster when prefix is ambiguous/
491 # Uses unfiltered repo because it's faster when prefix is ambiguous/
492 # This matches the shortesthexnodeidprefix() function below.
492 # This matches the shortesthexnodeidprefix() function below.
493 node = repo.unfiltered().changelog._partialmatch(prefix)
493 node = repo.unfiltered().changelog._partialmatch(prefix)
494 except error.AmbiguousPrefixLookupError:
494 except error.AmbiguousPrefixLookupError:
495 revset = repo.ui.config(
495 revset = repo.ui.config(
496 b'experimental', b'revisions.disambiguatewithin'
496 b'experimental', b'revisions.disambiguatewithin'
497 )
497 )
498 if revset:
498 if revset:
499 # Clear config to avoid infinite recursion
499 # Clear config to avoid infinite recursion
500 configoverrides = {
500 configoverrides = {
501 (b'experimental', b'revisions.disambiguatewithin'): None
501 (b'experimental', b'revisions.disambiguatewithin'): None
502 }
502 }
503 with repo.ui.configoverride(configoverrides):
503 with repo.ui.configoverride(configoverrides):
504 revs = repo.anyrevs([revset], user=True)
504 revs = repo.anyrevs([revset], user=True)
505 matches = []
505 matches = []
506 for rev in revs:
506 for rev in revs:
507 node = repo.changelog.node(rev)
507 node = repo.changelog.node(rev)
508 if hex(node).startswith(prefix):
508 if hex(node).startswith(prefix):
509 matches.append(node)
509 matches.append(node)
510 if len(matches) == 1:
510 if len(matches) == 1:
511 return matches[0]
511 return matches[0]
512 raise
512 raise
513 if node is None:
513 if node is None:
514 return
514 return
515 repo.changelog.rev(node) # make sure node isn't filtered
515 repo.changelog.rev(node) # make sure node isn't filtered
516 return node
516 return node
517
517
518
518
519 def mayberevnum(repo, prefix):
519 def mayberevnum(repo, prefix):
520 """Checks if the given prefix may be mistaken for a revision number"""
520 """Checks if the given prefix may be mistaken for a revision number"""
521 try:
521 try:
522 i = int(prefix)
522 i = int(prefix)
523 # if we are a pure int, then starting with zero will not be
523 # if we are a pure int, then starting with zero will not be
524 # confused as a rev; or, obviously, if the int is larger
524 # confused as a rev; or, obviously, if the int is larger
525 # than the value of the tip rev. We still need to disambiguate if
525 # than the value of the tip rev. We still need to disambiguate if
526 # prefix == '0', since that *is* a valid revnum.
526 # prefix == '0', since that *is* a valid revnum.
527 if (prefix != b'0' and prefix[0:1] == b'0') or i >= len(repo):
527 if (prefix != b'0' and prefix[0:1] == b'0') or i >= len(repo):
528 return False
528 return False
529 return True
529 return True
530 except ValueError:
530 except ValueError:
531 return False
531 return False
532
532
533
533
534 def shortesthexnodeidprefix(repo, node, minlength=1, cache=None):
534 def shortesthexnodeidprefix(repo, node, minlength=1, cache=None):
535 """Find the shortest unambiguous prefix that matches hexnode.
535 """Find the shortest unambiguous prefix that matches hexnode.
536
536
537 If "cache" is not None, it must be a dictionary that can be used for
537 If "cache" is not None, it must be a dictionary that can be used for
538 caching between calls to this method.
538 caching between calls to this method.
539 """
539 """
540 # _partialmatch() of filtered changelog could take O(len(repo)) time,
540 # _partialmatch() of filtered changelog could take O(len(repo)) time,
541 # which would be unacceptably slow. so we look for hash collision in
541 # which would be unacceptably slow. so we look for hash collision in
542 # unfiltered space, which means some hashes may be slightly longer.
542 # unfiltered space, which means some hashes may be slightly longer.
543
543
544 minlength = max(minlength, 1)
544 minlength = max(minlength, 1)
545
545
546 def disambiguate(prefix):
546 def disambiguate(prefix):
547 """Disambiguate against revnums."""
547 """Disambiguate against revnums."""
548 if repo.ui.configbool(b'experimental', b'revisions.prefixhexnode'):
548 if repo.ui.configbool(b'experimental', b'revisions.prefixhexnode'):
549 if mayberevnum(repo, prefix):
549 if mayberevnum(repo, prefix):
550 return b'x' + prefix
550 return b'x' + prefix
551 else:
551 else:
552 return prefix
552 return prefix
553
553
554 hexnode = hex(node)
554 hexnode = hex(node)
555 for length in range(len(prefix), len(hexnode) + 1):
555 for length in range(len(prefix), len(hexnode) + 1):
556 prefix = hexnode[:length]
556 prefix = hexnode[:length]
557 if not mayberevnum(repo, prefix):
557 if not mayberevnum(repo, prefix):
558 return prefix
558 return prefix
559
559
560 cl = repo.unfiltered().changelog
560 cl = repo.unfiltered().changelog
561 revset = repo.ui.config(b'experimental', b'revisions.disambiguatewithin')
561 revset = repo.ui.config(b'experimental', b'revisions.disambiguatewithin')
562 if revset:
562 if revset:
563 revs = None
563 revs = None
564 if cache is not None:
564 if cache is not None:
565 revs = cache.get(b'disambiguationrevset')
565 revs = cache.get(b'disambiguationrevset')
566 if revs is None:
566 if revs is None:
567 revs = repo.anyrevs([revset], user=True)
567 revs = repo.anyrevs([revset], user=True)
568 if cache is not None:
568 if cache is not None:
569 cache[b'disambiguationrevset'] = revs
569 cache[b'disambiguationrevset'] = revs
570 if cl.rev(node) in revs:
570 if cl.rev(node) in revs:
571 hexnode = hex(node)
571 hexnode = hex(node)
572 nodetree = None
572 nodetree = None
573 if cache is not None:
573 if cache is not None:
574 nodetree = cache.get(b'disambiguationnodetree')
574 nodetree = cache.get(b'disambiguationnodetree')
575 if not nodetree:
575 if not nodetree:
576 if util.safehasattr(parsers, 'nodetree'):
576 if util.safehasattr(parsers, 'nodetree'):
577 # The CExt is the only implementation to provide a nodetree
577 # The CExt is the only implementation to provide a nodetree
578 # class so far.
578 # class so far.
579 index = cl.index
579 index = cl.index
580 if util.safehasattr(index, 'get_cindex'):
580 if util.safehasattr(index, 'get_cindex'):
581 # the rust wrapped need to give access to its internal index
581 # the rust wrapped need to give access to its internal index
582 index = index.get_cindex()
582 index = index.get_cindex()
583 nodetree = parsers.nodetree(index, len(revs))
583 nodetree = parsers.nodetree(index, len(revs))
584 for r in revs:
584 for r in revs:
585 nodetree.insert(r)
585 nodetree.insert(r)
586 if cache is not None:
586 if cache is not None:
587 cache[b'disambiguationnodetree'] = nodetree
587 cache[b'disambiguationnodetree'] = nodetree
588 if nodetree is not None:
588 if nodetree is not None:
589 length = max(nodetree.shortest(node), minlength)
589 length = max(nodetree.shortest(node), minlength)
590 prefix = hexnode[:length]
590 prefix = hexnode[:length]
591 return disambiguate(prefix)
591 return disambiguate(prefix)
592 for length in range(minlength, len(hexnode) + 1):
592 for length in range(minlength, len(hexnode) + 1):
593 matches = []
593 matches = []
594 prefix = hexnode[:length]
594 prefix = hexnode[:length]
595 for rev in revs:
595 for rev in revs:
596 otherhexnode = repo[rev].hex()
596 otherhexnode = repo[rev].hex()
597 if prefix == otherhexnode[:length]:
597 if prefix == otherhexnode[:length]:
598 matches.append(otherhexnode)
598 matches.append(otherhexnode)
599 if len(matches) == 1:
599 if len(matches) == 1:
600 return disambiguate(prefix)
600 return disambiguate(prefix)
601
601
602 try:
602 try:
603 return disambiguate(cl.shortest(node, minlength))
603 return disambiguate(cl.shortest(node, minlength))
604 except error.LookupError:
604 except error.LookupError:
605 raise error.RepoLookupError()
605 raise error.RepoLookupError()
606
606
607
607
608 def isrevsymbol(repo, symbol):
608 def isrevsymbol(repo, symbol):
609 """Checks if a symbol exists in the repo.
609 """Checks if a symbol exists in the repo.
610
610
611 See revsymbol() for details. Raises error.AmbiguousPrefixLookupError if the
611 See revsymbol() for details. Raises error.AmbiguousPrefixLookupError if the
612 symbol is an ambiguous nodeid prefix.
612 symbol is an ambiguous nodeid prefix.
613 """
613 """
614 try:
614 try:
615 revsymbol(repo, symbol)
615 revsymbol(repo, symbol)
616 return True
616 return True
617 except error.RepoLookupError:
617 except error.RepoLookupError:
618 return False
618 return False
619
619
620
620
621 def revsymbol(repo, symbol):
621 def revsymbol(repo, symbol):
622 """Returns a context given a single revision symbol (as string).
622 """Returns a context given a single revision symbol (as string).
623
623
624 This is similar to revsingle(), but accepts only a single revision symbol,
624 This is similar to revsingle(), but accepts only a single revision symbol,
625 i.e. things like ".", "tip", "1234", "deadbeef", "my-bookmark" work, but
625 i.e. things like ".", "tip", "1234", "deadbeef", "my-bookmark" work, but
626 not "max(public())".
626 not "max(public())".
627 """
627 """
628 if not isinstance(symbol, bytes):
628 if not isinstance(symbol, bytes):
629 msg = (
629 msg = (
630 b"symbol (%s of type %s) was not a string, did you mean "
630 b"symbol (%s of type %s) was not a string, did you mean "
631 b"repo[symbol]?" % (symbol, type(symbol))
631 b"repo[symbol]?" % (symbol, type(symbol))
632 )
632 )
633 raise error.ProgrammingError(msg)
633 raise error.ProgrammingError(msg)
634 try:
634 try:
635 if symbol in (b'.', b'tip', b'null'):
635 if symbol in (b'.', b'tip', b'null'):
636 return repo[symbol]
636 return repo[symbol]
637
637
638 try:
638 try:
639 r = int(symbol)
639 r = int(symbol)
640 if b'%d' % r != symbol:
640 if b'%d' % r != symbol:
641 raise ValueError
641 raise ValueError
642 l = len(repo.changelog)
642 l = len(repo.changelog)
643 if r < 0:
643 if r < 0:
644 r += l
644 r += l
645 if r < 0 or r >= l and r != wdirrev:
645 if r < 0 or r >= l and r != wdirrev:
646 raise ValueError
646 raise ValueError
647 return repo[r]
647 return repo[r]
648 except error.FilteredIndexError:
648 except error.FilteredIndexError:
649 raise
649 raise
650 except (ValueError, OverflowError, IndexError):
650 except (ValueError, OverflowError, IndexError):
651 pass
651 pass
652
652
653 if len(symbol) == 40:
653 if len(symbol) == 40:
654 try:
654 try:
655 node = bin(symbol)
655 node = bin(symbol)
656 rev = repo.changelog.rev(node)
656 rev = repo.changelog.rev(node)
657 return repo[rev]
657 return repo[rev]
658 except error.FilteredLookupError:
658 except error.FilteredLookupError:
659 raise
659 raise
660 except (TypeError, LookupError):
660 except (TypeError, LookupError):
661 pass
661 pass
662
662
663 # look up bookmarks through the name interface
663 # look up bookmarks through the name interface
664 try:
664 try:
665 node = repo.names.singlenode(repo, symbol)
665 node = repo.names.singlenode(repo, symbol)
666 rev = repo.changelog.rev(node)
666 rev = repo.changelog.rev(node)
667 return repo[rev]
667 return repo[rev]
668 except KeyError:
668 except KeyError:
669 pass
669 pass
670
670
671 node = resolvehexnodeidprefix(repo, symbol)
671 node = resolvehexnodeidprefix(repo, symbol)
672 if node is not None:
672 if node is not None:
673 rev = repo.changelog.rev(node)
673 rev = repo.changelog.rev(node)
674 return repo[rev]
674 return repo[rev]
675
675
676 raise error.RepoLookupError(_(b"unknown revision '%s'") % symbol)
676 raise error.RepoLookupError(_(b"unknown revision '%s'") % symbol)
677
677
678 except error.WdirUnsupported:
678 except error.WdirUnsupported:
679 return repo[None]
679 return repo[None]
680 except (
680 except (
681 error.FilteredIndexError,
681 error.FilteredIndexError,
682 error.FilteredLookupError,
682 error.FilteredLookupError,
683 error.FilteredRepoLookupError,
683 error.FilteredRepoLookupError,
684 ):
684 ):
685 raise _filterederror(repo, symbol)
685 raise _filterederror(repo, symbol)
686
686
687
687
688 def _filterederror(repo, changeid):
688 def _filterederror(repo, changeid):
689 """build an exception to be raised about a filtered changeid
689 """build an exception to be raised about a filtered changeid
690
690
691 This is extracted in a function to help extensions (eg: evolve) to
691 This is extracted in a function to help extensions (eg: evolve) to
692 experiment with various message variants."""
692 experiment with various message variants."""
693 if repo.filtername.startswith(b'visible'):
693 if repo.filtername.startswith(b'visible'):
694
694
695 # Check if the changeset is obsolete
695 # Check if the changeset is obsolete
696 unfilteredrepo = repo.unfiltered()
696 unfilteredrepo = repo.unfiltered()
697 ctx = revsymbol(unfilteredrepo, changeid)
697 ctx = revsymbol(unfilteredrepo, changeid)
698
698
699 # If the changeset is obsolete, enrich the message with the reason
699 # If the changeset is obsolete, enrich the message with the reason
700 # that made this changeset not visible
700 # that made this changeset not visible
701 if ctx.obsolete():
701 if ctx.obsolete():
702 msg = obsutil._getfilteredreason(repo, changeid, ctx)
702 msg = obsutil._getfilteredreason(repo, changeid, ctx)
703 else:
703 else:
704 msg = _(b"hidden revision '%s'") % changeid
704 msg = _(b"hidden revision '%s'") % changeid
705
705
706 hint = _(b'use --hidden to access hidden revisions')
706 hint = _(b'use --hidden to access hidden revisions')
707
707
708 return error.FilteredRepoLookupError(msg, hint=hint)
708 return error.FilteredRepoLookupError(msg, hint=hint)
709 msg = _(b"filtered revision '%s' (not in '%s' subset)")
709 msg = _(b"filtered revision '%s' (not in '%s' subset)")
710 msg %= (changeid, repo.filtername)
710 msg %= (changeid, repo.filtername)
711 return error.FilteredRepoLookupError(msg)
711 return error.FilteredRepoLookupError(msg)
712
712
713
713
714 def revsingle(repo, revspec, default=b'.', localalias=None):
714 def revsingle(repo, revspec, default=b'.', localalias=None):
715 if not revspec and revspec != 0:
715 if not revspec and revspec != 0:
716 return repo[default]
716 return repo[default]
717
717
718 l = revrange(repo, [revspec], localalias=localalias)
718 l = revrange(repo, [revspec], localalias=localalias)
719 if not l:
719 if not l:
720 raise error.Abort(_(b'empty revision set'))
720 raise error.Abort(_(b'empty revision set'))
721 return repo[l.last()]
721 return repo[l.last()]
722
722
723
723
724 def _pairspec(revspec):
724 def _pairspec(revspec):
725 tree = revsetlang.parse(revspec)
725 tree = revsetlang.parse(revspec)
726 return tree and tree[0] in (
726 return tree and tree[0] in (
727 b'range',
727 b'range',
728 b'rangepre',
728 b'rangepre',
729 b'rangepost',
729 b'rangepost',
730 b'rangeall',
730 b'rangeall',
731 )
731 )
732
732
733
733
734 def revpair(repo, revs):
734 def revpair(repo, revs):
735 if not revs:
735 if not revs:
736 return repo[b'.'], repo[None]
736 return repo[b'.'], repo[None]
737
737
738 l = revrange(repo, revs)
738 l = revrange(repo, revs)
739
739
740 if not l:
740 if not l:
741 raise error.Abort(_(b'empty revision range'))
741 raise error.Abort(_(b'empty revision range'))
742
742
743 first = l.first()
743 first = l.first()
744 second = l.last()
744 second = l.last()
745
745
746 if (
746 if (
747 first == second
747 first == second
748 and len(revs) >= 2
748 and len(revs) >= 2
749 and not all(revrange(repo, [r]) for r in revs)
749 and not all(revrange(repo, [r]) for r in revs)
750 ):
750 ):
751 raise error.Abort(_(b'empty revision on one side of range'))
751 raise error.Abort(_(b'empty revision on one side of range'))
752
752
753 # if top-level is range expression, the result must always be a pair
753 # if top-level is range expression, the result must always be a pair
754 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
754 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
755 return repo[first], repo[None]
755 return repo[first], repo[None]
756
756
757 return repo[first], repo[second]
757 return repo[first], repo[second]
758
758
759
759
760 def revrange(repo, specs, localalias=None):
760 def revrange(repo, specs, localalias=None):
761 """Execute 1 to many revsets and return the union.
761 """Execute 1 to many revsets and return the union.
762
762
763 This is the preferred mechanism for executing revsets using user-specified
763 This is the preferred mechanism for executing revsets using user-specified
764 config options, such as revset aliases.
764 config options, such as revset aliases.
765
765
766 The revsets specified by ``specs`` will be executed via a chained ``OR``
766 The revsets specified by ``specs`` will be executed via a chained ``OR``
767 expression. If ``specs`` is empty, an empty result is returned.
767 expression. If ``specs`` is empty, an empty result is returned.
768
768
769 ``specs`` can contain integers, in which case they are assumed to be
769 ``specs`` can contain integers, in which case they are assumed to be
770 revision numbers.
770 revision numbers.
771
771
772 It is assumed the revsets are already formatted. If you have arguments
772 It is assumed the revsets are already formatted. If you have arguments
773 that need to be expanded in the revset, call ``revsetlang.formatspec()``
773 that need to be expanded in the revset, call ``revsetlang.formatspec()``
774 and pass the result as an element of ``specs``.
774 and pass the result as an element of ``specs``.
775
775
776 Specifying a single revset is allowed.
776 Specifying a single revset is allowed.
777
777
778 Returns a ``smartset.abstractsmartset`` which is a list-like interface over
778 Returns a ``smartset.abstractsmartset`` which is a list-like interface over
779 integer revisions.
779 integer revisions.
780 """
780 """
781 allspecs = []
781 allspecs = []
782 for spec in specs:
782 for spec in specs:
783 if isinstance(spec, int):
783 if isinstance(spec, int):
784 spec = revsetlang.formatspec(b'%d', spec)
784 spec = revsetlang.formatspec(b'%d', spec)
785 allspecs.append(spec)
785 allspecs.append(spec)
786 return repo.anyrevs(allspecs, user=True, localalias=localalias)
786 return repo.anyrevs(allspecs, user=True, localalias=localalias)
787
787
788
788
789 def increasingwindows(windowsize=8, sizelimit=512):
789 def increasingwindows(windowsize=8, sizelimit=512):
790 while True:
790 while True:
791 yield windowsize
791 yield windowsize
792 if windowsize < sizelimit:
792 if windowsize < sizelimit:
793 windowsize *= 2
793 windowsize *= 2
794
794
795
795
796 def walkchangerevs(repo, revs, makefilematcher, prepare):
796 def walkchangerevs(repo, revs, makefilematcher, prepare):
797 """Iterate over files and the revs in a "windowed" way.
797 """Iterate over files and the revs in a "windowed" way.
798
798
799 Callers most commonly need to iterate backwards over the history
799 Callers most commonly need to iterate backwards over the history
800 in which they are interested. Doing so has awful (quadratic-looking)
800 in which they are interested. Doing so has awful (quadratic-looking)
801 performance, so we use iterators in a "windowed" way.
801 performance, so we use iterators in a "windowed" way.
802
802
803 We walk a window of revisions in the desired order. Within the
803 We walk a window of revisions in the desired order. Within the
804 window, we first walk forwards to gather data, then in the desired
804 window, we first walk forwards to gather data, then in the desired
805 order (usually backwards) to display it.
805 order (usually backwards) to display it.
806
806
807 This function returns an iterator yielding contexts. Before
807 This function returns an iterator yielding contexts. Before
808 yielding each context, the iterator will first call the prepare
808 yielding each context, the iterator will first call the prepare
809 function on each context in the window in forward order."""
809 function on each context in the window in forward order."""
810
810
811 if not revs:
811 if not revs:
812 return []
812 return []
813 change = repo.__getitem__
813 change = repo.__getitem__
814
814
815 def iterate():
815 def iterate():
816 it = iter(revs)
816 it = iter(revs)
817 stopiteration = False
817 stopiteration = False
818 for windowsize in increasingwindows():
818 for windowsize in increasingwindows():
819 nrevs = []
819 nrevs = []
820 for i in pycompat.xrange(windowsize):
820 for i in pycompat.xrange(windowsize):
821 rev = next(it, None)
821 rev = next(it, None)
822 if rev is None:
822 if rev is None:
823 stopiteration = True
823 stopiteration = True
824 break
824 break
825 nrevs.append(rev)
825 nrevs.append(rev)
826 for rev in sorted(nrevs):
826 for rev in sorted(nrevs):
827 ctx = change(rev)
827 ctx = change(rev)
828 prepare(ctx, makefilematcher(ctx))
828 prepare(ctx, makefilematcher(ctx))
829 for rev in nrevs:
829 for rev in nrevs:
830 yield change(rev)
830 yield change(rev)
831
831
832 if stopiteration:
832 if stopiteration:
833 break
833 break
834
834
835 return iterate()
835 return iterate()
836
836
837
837
838 def meaningfulparents(repo, ctx):
838 def meaningfulparents(repo, ctx):
839 """Return list of meaningful (or all if debug) parentrevs for rev.
839 """Return list of meaningful (or all if debug) parentrevs for rev.
840
840
841 For merges (two non-nullrev revisions) both parents are meaningful.
841 For merges (two non-nullrev revisions) both parents are meaningful.
842 Otherwise the first parent revision is considered meaningful if it
842 Otherwise the first parent revision is considered meaningful if it
843 is not the preceding revision.
843 is not the preceding revision.
844 """
844 """
845 parents = ctx.parents()
845 parents = ctx.parents()
846 if len(parents) > 1:
846 if len(parents) > 1:
847 return parents
847 return parents
848 if repo.ui.debugflag:
848 if repo.ui.debugflag:
849 return [parents[0], repo[nullrev]]
849 return [parents[0], repo[nullrev]]
850 if parents[0].rev() >= intrev(ctx) - 1:
850 if parents[0].rev() >= intrev(ctx) - 1:
851 return []
851 return []
852 return parents
852 return parents
853
853
854
854
855 def getuipathfn(repo, legacyrelativevalue=False, forcerelativevalue=None):
855 def getuipathfn(repo, legacyrelativevalue=False, forcerelativevalue=None):
856 """Return a function that produced paths for presenting to the user.
856 """Return a function that produced paths for presenting to the user.
857
857
858 The returned function takes a repo-relative path and produces a path
858 The returned function takes a repo-relative path and produces a path
859 that can be presented in the UI.
859 that can be presented in the UI.
860
860
861 Depending on the value of ui.relative-paths, either a repo-relative or
861 Depending on the value of ui.relative-paths, either a repo-relative or
862 cwd-relative path will be produced.
862 cwd-relative path will be produced.
863
863
864 legacyrelativevalue is the value to use if ui.relative-paths=legacy
864 legacyrelativevalue is the value to use if ui.relative-paths=legacy
865
865
866 If forcerelativevalue is not None, then that value will be used regardless
866 If forcerelativevalue is not None, then that value will be used regardless
867 of what ui.relative-paths is set to.
867 of what ui.relative-paths is set to.
868 """
868 """
869 if forcerelativevalue is not None:
869 if forcerelativevalue is not None:
870 relative = forcerelativevalue
870 relative = forcerelativevalue
871 else:
871 else:
872 config = repo.ui.config(b'ui', b'relative-paths')
872 config = repo.ui.config(b'ui', b'relative-paths')
873 if config == b'legacy':
873 if config == b'legacy':
874 relative = legacyrelativevalue
874 relative = legacyrelativevalue
875 else:
875 else:
876 relative = stringutil.parsebool(config)
876 relative = stringutil.parsebool(config)
877 if relative is None:
877 if relative is None:
878 raise error.ConfigError(
878 raise error.ConfigError(
879 _(b"ui.relative-paths is not a boolean ('%s')") % config
879 _(b"ui.relative-paths is not a boolean ('%s')") % config
880 )
880 )
881
881
882 if relative:
882 if relative:
883 cwd = repo.getcwd()
883 cwd = repo.getcwd()
884 if cwd != b'':
884 if cwd != b'':
885 # this branch would work even if cwd == b'' (ie cwd = repo
885 # this branch would work even if cwd == b'' (ie cwd = repo
886 # root), but its generality makes the returned function slower
886 # root), but its generality makes the returned function slower
887 pathto = repo.pathto
887 pathto = repo.pathto
888 return lambda f: pathto(f, cwd)
888 return lambda f: pathto(f, cwd)
889 if repo.ui.configbool(b'ui', b'slash'):
889 if repo.ui.configbool(b'ui', b'slash'):
890 return lambda f: f
890 return lambda f: f
891 else:
891 else:
892 return util.localpath
892 return util.localpath
893
893
894
894
895 def subdiruipathfn(subpath, uipathfn):
895 def subdiruipathfn(subpath, uipathfn):
896 '''Create a new uipathfn that treats the file as relative to subpath.'''
896 '''Create a new uipathfn that treats the file as relative to subpath.'''
897 return lambda f: uipathfn(posixpath.join(subpath, f))
897 return lambda f: uipathfn(posixpath.join(subpath, f))
898
898
899
899
900 def anypats(pats, opts):
900 def anypats(pats, opts):
901 """Checks if any patterns, including --include and --exclude were given.
901 """Checks if any patterns, including --include and --exclude were given.
902
902
903 Some commands (e.g. addremove) use this condition for deciding whether to
903 Some commands (e.g. addremove) use this condition for deciding whether to
904 print absolute or relative paths.
904 print absolute or relative paths.
905 """
905 """
906 return bool(pats or opts.get(b'include') or opts.get(b'exclude'))
906 return bool(pats or opts.get(b'include') or opts.get(b'exclude'))
907
907
908
908
909 def expandpats(pats):
909 def expandpats(pats):
910 """Expand bare globs when running on windows.
910 """Expand bare globs when running on windows.
911 On posix we assume it already has already been done by sh."""
911 On posix we assume it already has already been done by sh."""
912 if not util.expandglobs:
912 if not util.expandglobs:
913 return list(pats)
913 return list(pats)
914 ret = []
914 ret = []
915 for kindpat in pats:
915 for kindpat in pats:
916 kind, pat = matchmod._patsplit(kindpat, None)
916 kind, pat = matchmod._patsplit(kindpat, None)
917 if kind is None:
917 if kind is None:
918 try:
918 try:
919 globbed = glob.glob(pat)
919 globbed = glob.glob(pat)
920 except re.error:
920 except re.error:
921 globbed = [pat]
921 globbed = [pat]
922 if globbed:
922 if globbed:
923 ret.extend(globbed)
923 ret.extend(globbed)
924 continue
924 continue
925 ret.append(kindpat)
925 ret.append(kindpat)
926 return ret
926 return ret
927
927
928
928
929 def matchandpats(
929 def matchandpats(
930 ctx, pats=(), opts=None, globbed=False, default=b'relpath', badfn=None
930 ctx, pats=(), opts=None, globbed=False, default=b'relpath', badfn=None
931 ):
931 ):
932 """Return a matcher and the patterns that were used.
932 """Return a matcher and the patterns that were used.
933 The matcher will warn about bad matches, unless an alternate badfn callback
933 The matcher will warn about bad matches, unless an alternate badfn callback
934 is provided."""
934 is provided."""
935 if opts is None:
935 if opts is None:
936 opts = {}
936 opts = {}
937 if not globbed and default == b'relpath':
937 if not globbed and default == b'relpath':
938 pats = expandpats(pats or [])
938 pats = expandpats(pats or [])
939
939
940 uipathfn = getuipathfn(ctx.repo(), legacyrelativevalue=True)
940 uipathfn = getuipathfn(ctx.repo(), legacyrelativevalue=True)
941
941
942 def bad(f, msg):
942 def bad(f, msg):
943 ctx.repo().ui.warn(b"%s: %s\n" % (uipathfn(f), msg))
943 ctx.repo().ui.warn(b"%s: %s\n" % (uipathfn(f), msg))
944
944
945 if badfn is None:
945 if badfn is None:
946 badfn = bad
946 badfn = bad
947
947
948 m = ctx.match(
948 m = ctx.match(
949 pats,
949 pats,
950 opts.get(b'include'),
950 opts.get(b'include'),
951 opts.get(b'exclude'),
951 opts.get(b'exclude'),
952 default,
952 default,
953 listsubrepos=opts.get(b'subrepos'),
953 listsubrepos=opts.get(b'subrepos'),
954 badfn=badfn,
954 badfn=badfn,
955 )
955 )
956
956
957 if m.always():
957 if m.always():
958 pats = []
958 pats = []
959 return m, pats
959 return m, pats
960
960
961
961
962 def match(
962 def match(
963 ctx, pats=(), opts=None, globbed=False, default=b'relpath', badfn=None
963 ctx, pats=(), opts=None, globbed=False, default=b'relpath', badfn=None
964 ):
964 ):
965 '''Return a matcher that will warn about bad matches.'''
965 '''Return a matcher that will warn about bad matches.'''
966 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
966 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
967
967
968
968
969 def matchall(repo):
969 def matchall(repo):
970 '''Return a matcher that will efficiently match everything.'''
970 '''Return a matcher that will efficiently match everything.'''
971 return matchmod.always()
971 return matchmod.always()
972
972
973
973
974 def matchfiles(repo, files, badfn=None):
974 def matchfiles(repo, files, badfn=None):
975 '''Return a matcher that will efficiently match exactly these files.'''
975 '''Return a matcher that will efficiently match exactly these files.'''
976 return matchmod.exact(files, badfn=badfn)
976 return matchmod.exact(files, badfn=badfn)
977
977
978
978
979 def parsefollowlinespattern(repo, rev, pat, msg):
979 def parsefollowlinespattern(repo, rev, pat, msg):
980 """Return a file name from `pat` pattern suitable for usage in followlines
980 """Return a file name from `pat` pattern suitable for usage in followlines
981 logic.
981 logic.
982 """
982 """
983 if not matchmod.patkind(pat):
983 if not matchmod.patkind(pat):
984 return pathutil.canonpath(repo.root, repo.getcwd(), pat)
984 return pathutil.canonpath(repo.root, repo.getcwd(), pat)
985 else:
985 else:
986 ctx = repo[rev]
986 ctx = repo[rev]
987 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=ctx)
987 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=ctx)
988 files = [f for f in ctx if m(f)]
988 files = [f for f in ctx if m(f)]
989 if len(files) != 1:
989 if len(files) != 1:
990 raise error.ParseError(msg)
990 raise error.ParseError(msg)
991 return files[0]
991 return files[0]
992
992
993
993
994 def getorigvfs(ui, repo):
994 def getorigvfs(ui, repo):
995 """return a vfs suitable to save 'orig' file
995 """return a vfs suitable to save 'orig' file
996
996
997 return None if no special directory is configured"""
997 return None if no special directory is configured"""
998 origbackuppath = ui.config(b'ui', b'origbackuppath')
998 origbackuppath = ui.config(b'ui', b'origbackuppath')
999 if not origbackuppath:
999 if not origbackuppath:
1000 return None
1000 return None
1001 return vfs.vfs(repo.wvfs.join(origbackuppath))
1001 return vfs.vfs(repo.wvfs.join(origbackuppath))
1002
1002
1003
1003
1004 def backuppath(ui, repo, filepath):
1004 def backuppath(ui, repo, filepath):
1005 """customize where working copy backup files (.orig files) are created
1005 """customize where working copy backup files (.orig files) are created
1006
1006
1007 Fetch user defined path from config file: [ui] origbackuppath = <path>
1007 Fetch user defined path from config file: [ui] origbackuppath = <path>
1008 Fall back to default (filepath with .orig suffix) if not specified
1008 Fall back to default (filepath with .orig suffix) if not specified
1009
1009
1010 filepath is repo-relative
1010 filepath is repo-relative
1011
1011
1012 Returns an absolute path
1012 Returns an absolute path
1013 """
1013 """
1014 origvfs = getorigvfs(ui, repo)
1014 origvfs = getorigvfs(ui, repo)
1015 if origvfs is None:
1015 if origvfs is None:
1016 return repo.wjoin(filepath + b".orig")
1016 return repo.wjoin(filepath + b".orig")
1017
1017
1018 origbackupdir = origvfs.dirname(filepath)
1018 origbackupdir = origvfs.dirname(filepath)
1019 if not origvfs.isdir(origbackupdir) or origvfs.islink(origbackupdir):
1019 if not origvfs.isdir(origbackupdir) or origvfs.islink(origbackupdir):
1020 ui.note(_(b'creating directory: %s\n') % origvfs.join(origbackupdir))
1020 ui.note(_(b'creating directory: %s\n') % origvfs.join(origbackupdir))
1021
1021
1022 # Remove any files that conflict with the backup file's path
1022 # Remove any files that conflict with the backup file's path
1023 for f in reversed(list(pathutil.finddirs(filepath))):
1023 for f in reversed(list(pathutil.finddirs(filepath))):
1024 if origvfs.isfileorlink(f):
1024 if origvfs.isfileorlink(f):
1025 ui.note(_(b'removing conflicting file: %s\n') % origvfs.join(f))
1025 ui.note(_(b'removing conflicting file: %s\n') % origvfs.join(f))
1026 origvfs.unlink(f)
1026 origvfs.unlink(f)
1027 break
1027 break
1028
1028
1029 origvfs.makedirs(origbackupdir)
1029 origvfs.makedirs(origbackupdir)
1030
1030
1031 if origvfs.isdir(filepath) and not origvfs.islink(filepath):
1031 if origvfs.isdir(filepath) and not origvfs.islink(filepath):
1032 ui.note(
1032 ui.note(
1033 _(b'removing conflicting directory: %s\n') % origvfs.join(filepath)
1033 _(b'removing conflicting directory: %s\n') % origvfs.join(filepath)
1034 )
1034 )
1035 origvfs.rmtree(filepath, forcibly=True)
1035 origvfs.rmtree(filepath, forcibly=True)
1036
1036
1037 return origvfs.join(filepath)
1037 return origvfs.join(filepath)
1038
1038
1039
1039
1040 class _containsnode(object):
1040 class _containsnode(object):
1041 """proxy __contains__(node) to container.__contains__ which accepts revs"""
1041 """proxy __contains__(node) to container.__contains__ which accepts revs"""
1042
1042
1043 def __init__(self, repo, revcontainer):
1043 def __init__(self, repo, revcontainer):
1044 self._torev = repo.changelog.rev
1044 self._torev = repo.changelog.rev
1045 self._revcontains = revcontainer.__contains__
1045 self._revcontains = revcontainer.__contains__
1046
1046
1047 def __contains__(self, node):
1047 def __contains__(self, node):
1048 return self._revcontains(self._torev(node))
1048 return self._revcontains(self._torev(node))
1049
1049
1050
1050
1051 def cleanupnodes(
1051 def cleanupnodes(
1052 repo,
1052 repo,
1053 replacements,
1053 replacements,
1054 operation,
1054 operation,
1055 moves=None,
1055 moves=None,
1056 metadata=None,
1056 metadata=None,
1057 fixphase=False,
1057 fixphase=False,
1058 targetphase=None,
1058 targetphase=None,
1059 backup=True,
1059 backup=True,
1060 ):
1060 ):
1061 """do common cleanups when old nodes are replaced by new nodes
1061 """do common cleanups when old nodes are replaced by new nodes
1062
1062
1063 That includes writing obsmarkers or stripping nodes, and moving bookmarks.
1063 That includes writing obsmarkers or stripping nodes, and moving bookmarks.
1064 (we might also want to move working directory parent in the future)
1064 (we might also want to move working directory parent in the future)
1065
1065
1066 By default, bookmark moves are calculated automatically from 'replacements',
1066 By default, bookmark moves are calculated automatically from 'replacements',
1067 but 'moves' can be used to override that. Also, 'moves' may include
1067 but 'moves' can be used to override that. Also, 'moves' may include
1068 additional bookmark moves that should not have associated obsmarkers.
1068 additional bookmark moves that should not have associated obsmarkers.
1069
1069
1070 replacements is {oldnode: [newnode]} or a iterable of nodes if they do not
1070 replacements is {oldnode: [newnode]} or a iterable of nodes if they do not
1071 have replacements. operation is a string, like "rebase".
1071 have replacements. operation is a string, like "rebase".
1072
1072
1073 metadata is dictionary containing metadata to be stored in obsmarker if
1073 metadata is dictionary containing metadata to be stored in obsmarker if
1074 obsolescence is enabled.
1074 obsolescence is enabled.
1075 """
1075 """
1076 assert fixphase or targetphase is None
1076 assert fixphase or targetphase is None
1077 if not replacements and not moves:
1077 if not replacements and not moves:
1078 return
1078 return
1079
1079
1080 # translate mapping's other forms
1080 # translate mapping's other forms
1081 if not util.safehasattr(replacements, b'items'):
1081 if not util.safehasattr(replacements, b'items'):
1082 replacements = {(n,): () for n in replacements}
1082 replacements = {(n,): () for n in replacements}
1083 else:
1083 else:
1084 # upgrading non tuple "source" to tuple ones for BC
1084 # upgrading non tuple "source" to tuple ones for BC
1085 repls = {}
1085 repls = {}
1086 for key, value in replacements.items():
1086 for key, value in replacements.items():
1087 if not isinstance(key, tuple):
1087 if not isinstance(key, tuple):
1088 key = (key,)
1088 key = (key,)
1089 repls[key] = value
1089 repls[key] = value
1090 replacements = repls
1090 replacements = repls
1091
1091
1092 # Unfiltered repo is needed since nodes in replacements might be hidden.
1092 # Unfiltered repo is needed since nodes in replacements might be hidden.
1093 unfi = repo.unfiltered()
1093 unfi = repo.unfiltered()
1094
1094
1095 # Calculate bookmark movements
1095 # Calculate bookmark movements
1096 if moves is None:
1096 if moves is None:
1097 moves = {}
1097 moves = {}
1098 for oldnodes, newnodes in replacements.items():
1098 for oldnodes, newnodes in replacements.items():
1099 for oldnode in oldnodes:
1099 for oldnode in oldnodes:
1100 if oldnode in moves:
1100 if oldnode in moves:
1101 continue
1101 continue
1102 if len(newnodes) > 1:
1102 if len(newnodes) > 1:
1103 # usually a split, take the one with biggest rev number
1103 # usually a split, take the one with biggest rev number
1104 newnode = next(unfi.set(b'max(%ln)', newnodes)).node()
1104 newnode = next(unfi.set(b'max(%ln)', newnodes)).node()
1105 elif len(newnodes) == 0:
1105 elif len(newnodes) == 0:
1106 # move bookmark backwards
1106 # move bookmark backwards
1107 allreplaced = []
1107 allreplaced = []
1108 for rep in replacements:
1108 for rep in replacements:
1109 allreplaced.extend(rep)
1109 allreplaced.extend(rep)
1110 roots = list(
1110 roots = list(
1111 unfi.set(b'max((::%n) - %ln)', oldnode, allreplaced)
1111 unfi.set(b'max((::%n) - %ln)', oldnode, allreplaced)
1112 )
1112 )
1113 if roots:
1113 if roots:
1114 newnode = roots[0].node()
1114 newnode = roots[0].node()
1115 else:
1115 else:
1116 newnode = nullid
1116 newnode = nullid
1117 else:
1117 else:
1118 newnode = newnodes[0]
1118 newnode = newnodes[0]
1119 moves[oldnode] = newnode
1119 moves[oldnode] = newnode
1120
1120
1121 allnewnodes = [n for ns in replacements.values() for n in ns]
1121 allnewnodes = [n for ns in replacements.values() for n in ns]
1122 toretract = {}
1122 toretract = {}
1123 toadvance = {}
1123 toadvance = {}
1124 if fixphase:
1124 if fixphase:
1125 precursors = {}
1125 precursors = {}
1126 for oldnodes, newnodes in replacements.items():
1126 for oldnodes, newnodes in replacements.items():
1127 for oldnode in oldnodes:
1127 for oldnode in oldnodes:
1128 for newnode in newnodes:
1128 for newnode in newnodes:
1129 precursors.setdefault(newnode, []).append(oldnode)
1129 precursors.setdefault(newnode, []).append(oldnode)
1130
1130
1131 allnewnodes.sort(key=lambda n: unfi[n].rev())
1131 allnewnodes.sort(key=lambda n: unfi[n].rev())
1132 newphases = {}
1132 newphases = {}
1133
1133
1134 def phase(ctx):
1134 def phase(ctx):
1135 return newphases.get(ctx.node(), ctx.phase())
1135 return newphases.get(ctx.node(), ctx.phase())
1136
1136
1137 for newnode in allnewnodes:
1137 for newnode in allnewnodes:
1138 ctx = unfi[newnode]
1138 ctx = unfi[newnode]
1139 parentphase = max(phase(p) for p in ctx.parents())
1139 parentphase = max(phase(p) for p in ctx.parents())
1140 if targetphase is None:
1140 if targetphase is None:
1141 oldphase = max(
1141 oldphase = max(
1142 unfi[oldnode].phase() for oldnode in precursors[newnode]
1142 unfi[oldnode].phase() for oldnode in precursors[newnode]
1143 )
1143 )
1144 newphase = max(oldphase, parentphase)
1144 newphase = max(oldphase, parentphase)
1145 else:
1145 else:
1146 newphase = max(targetphase, parentphase)
1146 newphase = max(targetphase, parentphase)
1147 newphases[newnode] = newphase
1147 newphases[newnode] = newphase
1148 if newphase > ctx.phase():
1148 if newphase > ctx.phase():
1149 toretract.setdefault(newphase, []).append(newnode)
1149 toretract.setdefault(newphase, []).append(newnode)
1150 elif newphase < ctx.phase():
1150 elif newphase < ctx.phase():
1151 toadvance.setdefault(newphase, []).append(newnode)
1151 toadvance.setdefault(newphase, []).append(newnode)
1152
1152
1153 with repo.transaction(b'cleanup') as tr:
1153 with repo.transaction(b'cleanup') as tr:
1154 # Move bookmarks
1154 # Move bookmarks
1155 bmarks = repo._bookmarks
1155 bmarks = repo._bookmarks
1156 bmarkchanges = []
1156 bmarkchanges = []
1157 for oldnode, newnode in moves.items():
1157 for oldnode, newnode in moves.items():
1158 oldbmarks = repo.nodebookmarks(oldnode)
1158 oldbmarks = repo.nodebookmarks(oldnode)
1159 if not oldbmarks:
1159 if not oldbmarks:
1160 continue
1160 continue
1161 from . import bookmarks # avoid import cycle
1161 from . import bookmarks # avoid import cycle
1162
1162
1163 repo.ui.debug(
1163 repo.ui.debug(
1164 b'moving bookmarks %r from %s to %s\n'
1164 b'moving bookmarks %r from %s to %s\n'
1165 % (
1165 % (
1166 pycompat.rapply(pycompat.maybebytestr, oldbmarks),
1166 pycompat.rapply(pycompat.maybebytestr, oldbmarks),
1167 hex(oldnode),
1167 hex(oldnode),
1168 hex(newnode),
1168 hex(newnode),
1169 )
1169 )
1170 )
1170 )
1171 # Delete divergent bookmarks being parents of related newnodes
1171 # Delete divergent bookmarks being parents of related newnodes
1172 deleterevs = repo.revs(
1172 deleterevs = repo.revs(
1173 b'parents(roots(%ln & (::%n))) - parents(%n)',
1173 b'parents(roots(%ln & (::%n))) - parents(%n)',
1174 allnewnodes,
1174 allnewnodes,
1175 newnode,
1175 newnode,
1176 oldnode,
1176 oldnode,
1177 )
1177 )
1178 deletenodes = _containsnode(repo, deleterevs)
1178 deletenodes = _containsnode(repo, deleterevs)
1179 for name in oldbmarks:
1179 for name in oldbmarks:
1180 bmarkchanges.append((name, newnode))
1180 bmarkchanges.append((name, newnode))
1181 for b in bookmarks.divergent2delete(repo, deletenodes, name):
1181 for b in bookmarks.divergent2delete(repo, deletenodes, name):
1182 bmarkchanges.append((b, None))
1182 bmarkchanges.append((b, None))
1183
1183
1184 if bmarkchanges:
1184 if bmarkchanges:
1185 bmarks.applychanges(repo, tr, bmarkchanges)
1185 bmarks.applychanges(repo, tr, bmarkchanges)
1186
1186
1187 for phase, nodes in toretract.items():
1187 for phase, nodes in toretract.items():
1188 phases.retractboundary(repo, tr, phase, nodes)
1188 phases.retractboundary(repo, tr, phase, nodes)
1189 for phase, nodes in toadvance.items():
1189 for phase, nodes in toadvance.items():
1190 phases.advanceboundary(repo, tr, phase, nodes)
1190 phases.advanceboundary(repo, tr, phase, nodes)
1191
1191
1192 mayusearchived = repo.ui.config(b'experimental', b'cleanup-as-archived')
1192 mayusearchived = repo.ui.config(b'experimental', b'cleanup-as-archived')
1193 # Obsolete or strip nodes
1193 # Obsolete or strip nodes
1194 if obsolete.isenabled(repo, obsolete.createmarkersopt):
1194 if obsolete.isenabled(repo, obsolete.createmarkersopt):
1195 # If a node is already obsoleted, and we want to obsolete it
1195 # If a node is already obsoleted, and we want to obsolete it
1196 # without a successor, skip that obssolete request since it's
1196 # without a successor, skip that obssolete request since it's
1197 # unnecessary. That's the "if s or not isobs(n)" check below.
1197 # unnecessary. That's the "if s or not isobs(n)" check below.
1198 # Also sort the node in topology order, that might be useful for
1198 # Also sort the node in topology order, that might be useful for
1199 # some obsstore logic.
1199 # some obsstore logic.
1200 # NOTE: the sorting might belong to createmarkers.
1200 # NOTE: the sorting might belong to createmarkers.
1201 torev = unfi.changelog.rev
1201 torev = unfi.changelog.rev
1202 sortfunc = lambda ns: torev(ns[0][0])
1202 sortfunc = lambda ns: torev(ns[0][0])
1203 rels = []
1203 rels = []
1204 for ns, s in sorted(replacements.items(), key=sortfunc):
1204 for ns, s in sorted(replacements.items(), key=sortfunc):
1205 rel = (tuple(unfi[n] for n in ns), tuple(unfi[m] for m in s))
1205 rel = (tuple(unfi[n] for n in ns), tuple(unfi[m] for m in s))
1206 rels.append(rel)
1206 rels.append(rel)
1207 if rels:
1207 if rels:
1208 obsolete.createmarkers(
1208 obsolete.createmarkers(
1209 repo, rels, operation=operation, metadata=metadata
1209 repo, rels, operation=operation, metadata=metadata
1210 )
1210 )
1211 elif phases.supportinternal(repo) and mayusearchived:
1211 elif phases.supportinternal(repo) and mayusearchived:
1212 # this assume we do not have "unstable" nodes above the cleaned ones
1212 # this assume we do not have "unstable" nodes above the cleaned ones
1213 allreplaced = set()
1213 allreplaced = set()
1214 for ns in replacements.keys():
1214 for ns in replacements.keys():
1215 allreplaced.update(ns)
1215 allreplaced.update(ns)
1216 if backup:
1216 if backup:
1217 from . import repair # avoid import cycle
1217 from . import repair # avoid import cycle
1218
1218
1219 node = min(allreplaced, key=repo.changelog.rev)
1219 node = min(allreplaced, key=repo.changelog.rev)
1220 repair.backupbundle(
1220 repair.backupbundle(
1221 repo, allreplaced, allreplaced, node, operation
1221 repo, allreplaced, allreplaced, node, operation
1222 )
1222 )
1223 phases.retractboundary(repo, tr, phases.archived, allreplaced)
1223 phases.retractboundary(repo, tr, phases.archived, allreplaced)
1224 else:
1224 else:
1225 from . import repair # avoid import cycle
1225 from . import repair # avoid import cycle
1226
1226
1227 tostrip = list(n for ns in replacements for n in ns)
1227 tostrip = list(n for ns in replacements for n in ns)
1228 if tostrip:
1228 if tostrip:
1229 repair.delayedstrip(
1229 repair.delayedstrip(
1230 repo.ui, repo, tostrip, operation, backup=backup
1230 repo.ui, repo, tostrip, operation, backup=backup
1231 )
1231 )
1232
1232
1233
1233
1234 def addremove(repo, matcher, prefix, uipathfn, opts=None):
1234 def addremove(repo, matcher, prefix, uipathfn, opts=None):
1235 if opts is None:
1235 if opts is None:
1236 opts = {}
1236 opts = {}
1237 m = matcher
1237 m = matcher
1238 dry_run = opts.get(b'dry_run')
1238 dry_run = opts.get(b'dry_run')
1239 try:
1239 try:
1240 similarity = float(opts.get(b'similarity') or 0)
1240 similarity = float(opts.get(b'similarity') or 0)
1241 except ValueError:
1241 except ValueError:
1242 raise error.Abort(_(b'similarity must be a number'))
1242 raise error.Abort(_(b'similarity must be a number'))
1243 if similarity < 0 or similarity > 100:
1243 if similarity < 0 or similarity > 100:
1244 raise error.Abort(_(b'similarity must be between 0 and 100'))
1244 raise error.Abort(_(b'similarity must be between 0 and 100'))
1245 similarity /= 100.0
1245 similarity /= 100.0
1246
1246
1247 ret = 0
1247 ret = 0
1248
1248
1249 wctx = repo[None]
1249 wctx = repo[None]
1250 for subpath in sorted(wctx.substate):
1250 for subpath in sorted(wctx.substate):
1251 submatch = matchmod.subdirmatcher(subpath, m)
1251 submatch = matchmod.subdirmatcher(subpath, m)
1252 if opts.get(b'subrepos') or m.exact(subpath) or any(submatch.files()):
1252 if opts.get(b'subrepos') or m.exact(subpath) or any(submatch.files()):
1253 sub = wctx.sub(subpath)
1253 sub = wctx.sub(subpath)
1254 subprefix = repo.wvfs.reljoin(prefix, subpath)
1254 subprefix = repo.wvfs.reljoin(prefix, subpath)
1255 subuipathfn = subdiruipathfn(subpath, uipathfn)
1255 subuipathfn = subdiruipathfn(subpath, uipathfn)
1256 try:
1256 try:
1257 if sub.addremove(submatch, subprefix, subuipathfn, opts):
1257 if sub.addremove(submatch, subprefix, subuipathfn, opts):
1258 ret = 1
1258 ret = 1
1259 except error.LookupError:
1259 except error.LookupError:
1260 repo.ui.status(
1260 repo.ui.status(
1261 _(b"skipping missing subrepository: %s\n")
1261 _(b"skipping missing subrepository: %s\n")
1262 % uipathfn(subpath)
1262 % uipathfn(subpath)
1263 )
1263 )
1264
1264
1265 rejected = []
1265 rejected = []
1266
1266
1267 def badfn(f, msg):
1267 def badfn(f, msg):
1268 if f in m.files():
1268 if f in m.files():
1269 m.bad(f, msg)
1269 m.bad(f, msg)
1270 rejected.append(f)
1270 rejected.append(f)
1271
1271
1272 badmatch = matchmod.badmatch(m, badfn)
1272 badmatch = matchmod.badmatch(m, badfn)
1273 added, unknown, deleted, removed, forgotten = _interestingfiles(
1273 added, unknown, deleted, removed, forgotten = _interestingfiles(
1274 repo, badmatch
1274 repo, badmatch
1275 )
1275 )
1276
1276
1277 unknownset = set(unknown + forgotten)
1277 unknownset = set(unknown + forgotten)
1278 toprint = unknownset.copy()
1278 toprint = unknownset.copy()
1279 toprint.update(deleted)
1279 toprint.update(deleted)
1280 for abs in sorted(toprint):
1280 for abs in sorted(toprint):
1281 if repo.ui.verbose or not m.exact(abs):
1281 if repo.ui.verbose or not m.exact(abs):
1282 if abs in unknownset:
1282 if abs in unknownset:
1283 status = _(b'adding %s\n') % uipathfn(abs)
1283 status = _(b'adding %s\n') % uipathfn(abs)
1284 label = b'ui.addremove.added'
1284 label = b'ui.addremove.added'
1285 else:
1285 else:
1286 status = _(b'removing %s\n') % uipathfn(abs)
1286 status = _(b'removing %s\n') % uipathfn(abs)
1287 label = b'ui.addremove.removed'
1287 label = b'ui.addremove.removed'
1288 repo.ui.status(status, label=label)
1288 repo.ui.status(status, label=label)
1289
1289
1290 renames = _findrenames(
1290 renames = _findrenames(
1291 repo, m, added + unknown, removed + deleted, similarity, uipathfn
1291 repo, m, added + unknown, removed + deleted, similarity, uipathfn
1292 )
1292 )
1293
1293
1294 if not dry_run:
1294 if not dry_run:
1295 _markchanges(repo, unknown + forgotten, deleted, renames)
1295 _markchanges(repo, unknown + forgotten, deleted, renames)
1296
1296
1297 for f in rejected:
1297 for f in rejected:
1298 if f in m.files():
1298 if f in m.files():
1299 return 1
1299 return 1
1300 return ret
1300 return ret
1301
1301
1302
1302
1303 def marktouched(repo, files, similarity=0.0):
1303 def marktouched(repo, files, similarity=0.0):
1304 """Assert that files have somehow been operated upon. files are relative to
1304 """Assert that files have somehow been operated upon. files are relative to
1305 the repo root."""
1305 the repo root."""
1306 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
1306 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
1307 rejected = []
1307 rejected = []
1308
1308
1309 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
1309 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
1310
1310
1311 if repo.ui.verbose:
1311 if repo.ui.verbose:
1312 unknownset = set(unknown + forgotten)
1312 unknownset = set(unknown + forgotten)
1313 toprint = unknownset.copy()
1313 toprint = unknownset.copy()
1314 toprint.update(deleted)
1314 toprint.update(deleted)
1315 for abs in sorted(toprint):
1315 for abs in sorted(toprint):
1316 if abs in unknownset:
1316 if abs in unknownset:
1317 status = _(b'adding %s\n') % abs
1317 status = _(b'adding %s\n') % abs
1318 else:
1318 else:
1319 status = _(b'removing %s\n') % abs
1319 status = _(b'removing %s\n') % abs
1320 repo.ui.status(status)
1320 repo.ui.status(status)
1321
1321
1322 # TODO: We should probably have the caller pass in uipathfn and apply it to
1322 # TODO: We should probably have the caller pass in uipathfn and apply it to
1323 # the messages above too. legacyrelativevalue=True is consistent with how
1323 # the messages above too. legacyrelativevalue=True is consistent with how
1324 # it used to work.
1324 # it used to work.
1325 uipathfn = getuipathfn(repo, legacyrelativevalue=True)
1325 uipathfn = getuipathfn(repo, legacyrelativevalue=True)
1326 renames = _findrenames(
1326 renames = _findrenames(
1327 repo, m, added + unknown, removed + deleted, similarity, uipathfn
1327 repo, m, added + unknown, removed + deleted, similarity, uipathfn
1328 )
1328 )
1329
1329
1330 _markchanges(repo, unknown + forgotten, deleted, renames)
1330 _markchanges(repo, unknown + forgotten, deleted, renames)
1331
1331
1332 for f in rejected:
1332 for f in rejected:
1333 if f in m.files():
1333 if f in m.files():
1334 return 1
1334 return 1
1335 return 0
1335 return 0
1336
1336
1337
1337
1338 def _interestingfiles(repo, matcher):
1338 def _interestingfiles(repo, matcher):
1339 """Walk dirstate with matcher, looking for files that addremove would care
1339 """Walk dirstate with matcher, looking for files that addremove would care
1340 about.
1340 about.
1341
1341
1342 This is different from dirstate.status because it doesn't care about
1342 This is different from dirstate.status because it doesn't care about
1343 whether files are modified or clean."""
1343 whether files are modified or clean."""
1344 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1344 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1345 audit_path = pathutil.pathauditor(repo.root, cached=True)
1345 audit_path = pathutil.pathauditor(repo.root, cached=True)
1346
1346
1347 ctx = repo[None]
1347 ctx = repo[None]
1348 dirstate = repo.dirstate
1348 dirstate = repo.dirstate
1349 matcher = repo.narrowmatch(matcher, includeexact=True)
1349 matcher = repo.narrowmatch(matcher, includeexact=True)
1350 walkresults = dirstate.walk(
1350 walkresults = dirstate.walk(
1351 matcher,
1351 matcher,
1352 subrepos=sorted(ctx.substate),
1352 subrepos=sorted(ctx.substate),
1353 unknown=True,
1353 unknown=True,
1354 ignored=False,
1354 ignored=False,
1355 full=False,
1355 full=False,
1356 )
1356 )
1357 for abs, st in pycompat.iteritems(walkresults):
1357 for abs, st in pycompat.iteritems(walkresults):
1358 dstate = dirstate[abs]
1358 dstate = dirstate[abs]
1359 if dstate == b'?' and audit_path.check(abs):
1359 if dstate == b'?' and audit_path.check(abs):
1360 unknown.append(abs)
1360 unknown.append(abs)
1361 elif dstate != b'r' and not st:
1361 elif dstate != b'r' and not st:
1362 deleted.append(abs)
1362 deleted.append(abs)
1363 elif dstate == b'r' and st:
1363 elif dstate == b'r' and st:
1364 forgotten.append(abs)
1364 forgotten.append(abs)
1365 # for finding renames
1365 # for finding renames
1366 elif dstate == b'r' and not st:
1366 elif dstate == b'r' and not st:
1367 removed.append(abs)
1367 removed.append(abs)
1368 elif dstate == b'a':
1368 elif dstate == b'a':
1369 added.append(abs)
1369 added.append(abs)
1370
1370
1371 return added, unknown, deleted, removed, forgotten
1371 return added, unknown, deleted, removed, forgotten
1372
1372
1373
1373
1374 def _findrenames(repo, matcher, added, removed, similarity, uipathfn):
1374 def _findrenames(repo, matcher, added, removed, similarity, uipathfn):
1375 '''Find renames from removed files to added ones.'''
1375 '''Find renames from removed files to added ones.'''
1376 renames = {}
1376 renames = {}
1377 if similarity > 0:
1377 if similarity > 0:
1378 for old, new, score in similar.findrenames(
1378 for old, new, score in similar.findrenames(
1379 repo, added, removed, similarity
1379 repo, added, removed, similarity
1380 ):
1380 ):
1381 if (
1381 if (
1382 repo.ui.verbose
1382 repo.ui.verbose
1383 or not matcher.exact(old)
1383 or not matcher.exact(old)
1384 or not matcher.exact(new)
1384 or not matcher.exact(new)
1385 ):
1385 ):
1386 repo.ui.status(
1386 repo.ui.status(
1387 _(
1387 _(
1388 b'recording removal of %s as rename to %s '
1388 b'recording removal of %s as rename to %s '
1389 b'(%d%% similar)\n'
1389 b'(%d%% similar)\n'
1390 )
1390 )
1391 % (uipathfn(old), uipathfn(new), score * 100)
1391 % (uipathfn(old), uipathfn(new), score * 100)
1392 )
1392 )
1393 renames[new] = old
1393 renames[new] = old
1394 return renames
1394 return renames
1395
1395
1396
1396
1397 def _markchanges(repo, unknown, deleted, renames):
1397 def _markchanges(repo, unknown, deleted, renames):
1398 """Marks the files in unknown as added, the files in deleted as removed,
1398 """Marks the files in unknown as added, the files in deleted as removed,
1399 and the files in renames as copied."""
1399 and the files in renames as copied."""
1400 wctx = repo[None]
1400 wctx = repo[None]
1401 with repo.wlock():
1401 with repo.wlock():
1402 wctx.forget(deleted)
1402 wctx.forget(deleted)
1403 wctx.add(unknown)
1403 wctx.add(unknown)
1404 for new, old in pycompat.iteritems(renames):
1404 for new, old in pycompat.iteritems(renames):
1405 wctx.copy(old, new)
1405 wctx.copy(old, new)
1406
1406
1407
1407
1408 def getrenamedfn(repo, endrev=None):
1408 def getrenamedfn(repo, endrev=None):
1409 if copiesmod.usechangesetcentricalgo(repo):
1409 if copiesmod.usechangesetcentricalgo(repo):
1410
1410
1411 def getrenamed(fn, rev):
1411 def getrenamed(fn, rev):
1412 ctx = repo[rev]
1412 ctx = repo[rev]
1413 p1copies = ctx.p1copies()
1413 p1copies = ctx.p1copies()
1414 if fn in p1copies:
1414 if fn in p1copies:
1415 return p1copies[fn]
1415 return p1copies[fn]
1416 p2copies = ctx.p2copies()
1416 p2copies = ctx.p2copies()
1417 if fn in p2copies:
1417 if fn in p2copies:
1418 return p2copies[fn]
1418 return p2copies[fn]
1419 return None
1419 return None
1420
1420
1421 return getrenamed
1421 return getrenamed
1422
1422
1423 rcache = {}
1423 rcache = {}
1424 if endrev is None:
1424 if endrev is None:
1425 endrev = len(repo)
1425 endrev = len(repo)
1426
1426
1427 def getrenamed(fn, rev):
1427 def getrenamed(fn, rev):
1428 """looks up all renames for a file (up to endrev) the first
1428 """looks up all renames for a file (up to endrev) the first
1429 time the file is given. It indexes on the changerev and only
1429 time the file is given. It indexes on the changerev and only
1430 parses the manifest if linkrev != changerev.
1430 parses the manifest if linkrev != changerev.
1431 Returns rename info for fn at changerev rev."""
1431 Returns rename info for fn at changerev rev."""
1432 if fn not in rcache:
1432 if fn not in rcache:
1433 rcache[fn] = {}
1433 rcache[fn] = {}
1434 fl = repo.file(fn)
1434 fl = repo.file(fn)
1435 for i in fl:
1435 for i in fl:
1436 lr = fl.linkrev(i)
1436 lr = fl.linkrev(i)
1437 renamed = fl.renamed(fl.node(i))
1437 renamed = fl.renamed(fl.node(i))
1438 rcache[fn][lr] = renamed and renamed[0]
1438 rcache[fn][lr] = renamed and renamed[0]
1439 if lr >= endrev:
1439 if lr >= endrev:
1440 break
1440 break
1441 if rev in rcache[fn]:
1441 if rev in rcache[fn]:
1442 return rcache[fn][rev]
1442 return rcache[fn][rev]
1443
1443
1444 # If linkrev != rev (i.e. rev not found in rcache) fallback to
1444 # If linkrev != rev (i.e. rev not found in rcache) fallback to
1445 # filectx logic.
1445 # filectx logic.
1446 try:
1446 try:
1447 return repo[rev][fn].copysource()
1447 return repo[rev][fn].copysource()
1448 except error.LookupError:
1448 except error.LookupError:
1449 return None
1449 return None
1450
1450
1451 return getrenamed
1451 return getrenamed
1452
1452
1453
1453
1454 def getcopiesfn(repo, endrev=None):
1454 def getcopiesfn(repo, endrev=None):
1455 if copiesmod.usechangesetcentricalgo(repo):
1455 if copiesmod.usechangesetcentricalgo(repo):
1456
1456
1457 def copiesfn(ctx):
1457 def copiesfn(ctx):
1458 if ctx.p2copies():
1458 if ctx.p2copies():
1459 allcopies = ctx.p1copies().copy()
1459 allcopies = ctx.p1copies().copy()
1460 # There should be no overlap
1460 # There should be no overlap
1461 allcopies.update(ctx.p2copies())
1461 allcopies.update(ctx.p2copies())
1462 return sorted(allcopies.items())
1462 return sorted(allcopies.items())
1463 else:
1463 else:
1464 return sorted(ctx.p1copies().items())
1464 return sorted(ctx.p1copies().items())
1465
1465
1466 else:
1466 else:
1467 getrenamed = getrenamedfn(repo, endrev)
1467 getrenamed = getrenamedfn(repo, endrev)
1468
1468
1469 def copiesfn(ctx):
1469 def copiesfn(ctx):
1470 copies = []
1470 copies = []
1471 for fn in ctx.files():
1471 for fn in ctx.files():
1472 rename = getrenamed(fn, ctx.rev())
1472 rename = getrenamed(fn, ctx.rev())
1473 if rename:
1473 if rename:
1474 copies.append((fn, rename))
1474 copies.append((fn, rename))
1475 return copies
1475 return copies
1476
1476
1477 return copiesfn
1477 return copiesfn
1478
1478
1479
1479
1480 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1480 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1481 """Update the dirstate to reflect the intent of copying src to dst. For
1481 """Update the dirstate to reflect the intent of copying src to dst. For
1482 different reasons it might not end with dst being marked as copied from src.
1482 different reasons it might not end with dst being marked as copied from src.
1483 """
1483 """
1484 origsrc = repo.dirstate.copied(src) or src
1484 origsrc = repo.dirstate.copied(src) or src
1485 if dst == origsrc: # copying back a copy?
1485 if dst == origsrc: # copying back a copy?
1486 if repo.dirstate[dst] not in b'mn' and not dryrun:
1486 if repo.dirstate[dst] not in b'mn' and not dryrun:
1487 repo.dirstate.normallookup(dst)
1487 repo.dirstate.normallookup(dst)
1488 else:
1488 else:
1489 if repo.dirstate[origsrc] == b'a' and origsrc == src:
1489 if repo.dirstate[origsrc] == b'a' and origsrc == src:
1490 if not ui.quiet:
1490 if not ui.quiet:
1491 ui.warn(
1491 ui.warn(
1492 _(
1492 _(
1493 b"%s has not been committed yet, so no copy "
1493 b"%s has not been committed yet, so no copy "
1494 b"data will be stored for %s.\n"
1494 b"data will be stored for %s.\n"
1495 )
1495 )
1496 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd))
1496 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd))
1497 )
1497 )
1498 if repo.dirstate[dst] in b'?r' and not dryrun:
1498 if repo.dirstate[dst] in b'?r' and not dryrun:
1499 wctx.add([dst])
1499 wctx.add([dst])
1500 elif not dryrun:
1500 elif not dryrun:
1501 wctx.copy(origsrc, dst)
1501 wctx.copy(origsrc, dst)
1502
1502
1503
1503
1504 def movedirstate(repo, newctx, match=None):
1504 def movedirstate(repo, newctx, match=None):
1505 """Move the dirstate to newctx and adjust it as necessary.
1505 """Move the dirstate to newctx and adjust it as necessary.
1506
1506
1507 A matcher can be provided as an optimization. It is probably a bug to pass
1507 A matcher can be provided as an optimization. It is probably a bug to pass
1508 a matcher that doesn't match all the differences between the parent of the
1508 a matcher that doesn't match all the differences between the parent of the
1509 working copy and newctx.
1509 working copy and newctx.
1510 """
1510 """
1511 oldctx = repo[b'.']
1511 oldctx = repo[b'.']
1512 ds = repo.dirstate
1512 ds = repo.dirstate
1513 copies = dict(ds.copies())
1513 copies = dict(ds.copies())
1514 ds.setparents(newctx.node(), nullid)
1514 ds.setparents(newctx.node(), nullid)
1515 s = newctx.status(oldctx, match=match)
1515 s = newctx.status(oldctx, match=match)
1516 for f in s.modified:
1516 for f in s.modified:
1517 if ds[f] == b'r':
1517 if ds[f] == b'r':
1518 # modified + removed -> removed
1518 # modified + removed -> removed
1519 continue
1519 continue
1520 ds.normallookup(f)
1520 ds.normallookup(f)
1521
1521
1522 for f in s.added:
1522 for f in s.added:
1523 if ds[f] == b'r':
1523 if ds[f] == b'r':
1524 # added + removed -> unknown
1524 # added + removed -> unknown
1525 ds.drop(f)
1525 ds.drop(f)
1526 elif ds[f] != b'a':
1526 elif ds[f] != b'a':
1527 ds.add(f)
1527 ds.add(f)
1528
1528
1529 for f in s.removed:
1529 for f in s.removed:
1530 if ds[f] == b'a':
1530 if ds[f] == b'a':
1531 # removed + added -> normal
1531 # removed + added -> normal
1532 ds.normallookup(f)
1532 ds.normallookup(f)
1533 elif ds[f] != b'r':
1533 elif ds[f] != b'r':
1534 ds.remove(f)
1534 ds.remove(f)
1535
1535
1536 # Merge old parent and old working dir copies
1536 # Merge old parent and old working dir copies
1537 oldcopies = copiesmod.pathcopies(newctx, oldctx, match)
1537 oldcopies = copiesmod.pathcopies(newctx, oldctx, match)
1538 oldcopies.update(copies)
1538 oldcopies.update(copies)
1539 copies = {
1539 copies = {
1540 dst: oldcopies.get(src, src)
1540 dst: oldcopies.get(src, src)
1541 for dst, src in pycompat.iteritems(oldcopies)
1541 for dst, src in pycompat.iteritems(oldcopies)
1542 }
1542 }
1543 # Adjust the dirstate copies
1543 # Adjust the dirstate copies
1544 for dst, src in pycompat.iteritems(copies):
1544 for dst, src in pycompat.iteritems(copies):
1545 if src not in newctx or dst in newctx or ds[dst] != b'a':
1545 if src not in newctx or dst in newctx or ds[dst] != b'a':
1546 src = None
1546 src = None
1547 ds.copy(src, dst)
1547 ds.copy(src, dst)
1548 repo._quick_access_changeid_invalidate()
1548 repo._quick_access_changeid_invalidate()
1549
1549
1550
1550
1551 def filterrequirements(requirements):
1551 def filterrequirements(requirements):
1552 """filters the requirements into two sets:
1552 """filters the requirements into two sets:
1553
1553
1554 wcreq: requirements which should be written in .hg/requires
1554 wcreq: requirements which should be written in .hg/requires
1555 storereq: which should be written in .hg/store/requires
1555 storereq: which should be written in .hg/store/requires
1556
1556
1557 Returns (wcreq, storereq)
1557 Returns (wcreq, storereq)
1558 """
1558 """
1559 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
1559 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
1560 wc, store = set(), set()
1560 wc, store = set(), set()
1561 for r in requirements:
1561 for r in requirements:
1562 if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
1562 if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
1563 wc.add(r)
1563 wc.add(r)
1564 else:
1564 else:
1565 store.add(r)
1565 store.add(r)
1566 return wc, store
1566 return wc, store
1567 return requirements, None
1567 return requirements, None
1568
1568
1569
1569
1570 def istreemanifest(repo):
1570 def istreemanifest(repo):
1571 """ returns whether the repository is using treemanifest or not """
1571 """ returns whether the repository is using treemanifest or not """
1572 return requirementsmod.TREEMANIFEST_REQUIREMENT in repo.requirements
1572 return requirementsmod.TREEMANIFEST_REQUIREMENT in repo.requirements
1573
1573
1574
1574
1575 def writereporequirements(repo, requirements=None):
1575 def writereporequirements(repo, requirements=None):
1576 """ writes requirements for the repo to .hg/requires """
1576 """writes requirements for the repo
1577
1578 Requirements are written to .hg/requires and .hg/store/requires based
1579 on whether share-safe mode is enabled and which requirements are wdir
1580 requirements and which are store requirements
1581 """
1577 if requirements:
1582 if requirements:
1578 repo.requirements = requirements
1583 repo.requirements = requirements
1579 wcreq, storereq = filterrequirements(repo.requirements)
1584 wcreq, storereq = filterrequirements(repo.requirements)
1580 if wcreq is not None:
1585 if wcreq is not None:
1581 writerequires(repo.vfs, wcreq)
1586 writerequires(repo.vfs, wcreq)
1582 if storereq is not None:
1587 if storereq is not None:
1583 writerequires(repo.svfs, storereq)
1588 writerequires(repo.svfs, storereq)
1584 elif repo.ui.configbool(b'format', b'usestore'):
1589 elif repo.ui.configbool(b'format', b'usestore'):
1585 # only remove store requires if we are using store
1590 # only remove store requires if we are using store
1586 repo.svfs.tryunlink(b'requires')
1591 repo.svfs.tryunlink(b'requires')
1587
1592
1588
1593
1589 def writerequires(opener, requirements):
1594 def writerequires(opener, requirements):
1590 with opener(b'requires', b'w', atomictemp=True) as fp:
1595 with opener(b'requires', b'w', atomictemp=True) as fp:
1591 for r in sorted(requirements):
1596 for r in sorted(requirements):
1592 fp.write(b"%s\n" % r)
1597 fp.write(b"%s\n" % r)
1593
1598
1594
1599
1595 class filecachesubentry(object):
1600 class filecachesubentry(object):
1596 def __init__(self, path, stat):
1601 def __init__(self, path, stat):
1597 self.path = path
1602 self.path = path
1598 self.cachestat = None
1603 self.cachestat = None
1599 self._cacheable = None
1604 self._cacheable = None
1600
1605
1601 if stat:
1606 if stat:
1602 self.cachestat = filecachesubentry.stat(self.path)
1607 self.cachestat = filecachesubentry.stat(self.path)
1603
1608
1604 if self.cachestat:
1609 if self.cachestat:
1605 self._cacheable = self.cachestat.cacheable()
1610 self._cacheable = self.cachestat.cacheable()
1606 else:
1611 else:
1607 # None means we don't know yet
1612 # None means we don't know yet
1608 self._cacheable = None
1613 self._cacheable = None
1609
1614
1610 def refresh(self):
1615 def refresh(self):
1611 if self.cacheable():
1616 if self.cacheable():
1612 self.cachestat = filecachesubentry.stat(self.path)
1617 self.cachestat = filecachesubentry.stat(self.path)
1613
1618
1614 def cacheable(self):
1619 def cacheable(self):
1615 if self._cacheable is not None:
1620 if self._cacheable is not None:
1616 return self._cacheable
1621 return self._cacheable
1617
1622
1618 # we don't know yet, assume it is for now
1623 # we don't know yet, assume it is for now
1619 return True
1624 return True
1620
1625
1621 def changed(self):
1626 def changed(self):
1622 # no point in going further if we can't cache it
1627 # no point in going further if we can't cache it
1623 if not self.cacheable():
1628 if not self.cacheable():
1624 return True
1629 return True
1625
1630
1626 newstat = filecachesubentry.stat(self.path)
1631 newstat = filecachesubentry.stat(self.path)
1627
1632
1628 # we may not know if it's cacheable yet, check again now
1633 # we may not know if it's cacheable yet, check again now
1629 if newstat and self._cacheable is None:
1634 if newstat and self._cacheable is None:
1630 self._cacheable = newstat.cacheable()
1635 self._cacheable = newstat.cacheable()
1631
1636
1632 # check again
1637 # check again
1633 if not self._cacheable:
1638 if not self._cacheable:
1634 return True
1639 return True
1635
1640
1636 if self.cachestat != newstat:
1641 if self.cachestat != newstat:
1637 self.cachestat = newstat
1642 self.cachestat = newstat
1638 return True
1643 return True
1639 else:
1644 else:
1640 return False
1645 return False
1641
1646
1642 @staticmethod
1647 @staticmethod
1643 def stat(path):
1648 def stat(path):
1644 try:
1649 try:
1645 return util.cachestat(path)
1650 return util.cachestat(path)
1646 except OSError as e:
1651 except OSError as e:
1647 if e.errno != errno.ENOENT:
1652 if e.errno != errno.ENOENT:
1648 raise
1653 raise
1649
1654
1650
1655
1651 class filecacheentry(object):
1656 class filecacheentry(object):
1652 def __init__(self, paths, stat=True):
1657 def __init__(self, paths, stat=True):
1653 self._entries = []
1658 self._entries = []
1654 for path in paths:
1659 for path in paths:
1655 self._entries.append(filecachesubentry(path, stat))
1660 self._entries.append(filecachesubentry(path, stat))
1656
1661
1657 def changed(self):
1662 def changed(self):
1658 '''true if any entry has changed'''
1663 '''true if any entry has changed'''
1659 for entry in self._entries:
1664 for entry in self._entries:
1660 if entry.changed():
1665 if entry.changed():
1661 return True
1666 return True
1662 return False
1667 return False
1663
1668
1664 def refresh(self):
1669 def refresh(self):
1665 for entry in self._entries:
1670 for entry in self._entries:
1666 entry.refresh()
1671 entry.refresh()
1667
1672
1668
1673
1669 class filecache(object):
1674 class filecache(object):
1670 """A property like decorator that tracks files under .hg/ for updates.
1675 """A property like decorator that tracks files under .hg/ for updates.
1671
1676
1672 On first access, the files defined as arguments are stat()ed and the
1677 On first access, the files defined as arguments are stat()ed and the
1673 results cached. The decorated function is called. The results are stashed
1678 results cached. The decorated function is called. The results are stashed
1674 away in a ``_filecache`` dict on the object whose method is decorated.
1679 away in a ``_filecache`` dict on the object whose method is decorated.
1675
1680
1676 On subsequent access, the cached result is used as it is set to the
1681 On subsequent access, the cached result is used as it is set to the
1677 instance dictionary.
1682 instance dictionary.
1678
1683
1679 On external property set/delete operations, the caller must update the
1684 On external property set/delete operations, the caller must update the
1680 corresponding _filecache entry appropriately. Use __class__.<attr>.set()
1685 corresponding _filecache entry appropriately. Use __class__.<attr>.set()
1681 instead of directly setting <attr>.
1686 instead of directly setting <attr>.
1682
1687
1683 When using the property API, the cached data is always used if available.
1688 When using the property API, the cached data is always used if available.
1684 No stat() is performed to check if the file has changed.
1689 No stat() is performed to check if the file has changed.
1685
1690
1686 Others can muck about with the state of the ``_filecache`` dict. e.g. they
1691 Others can muck about with the state of the ``_filecache`` dict. e.g. they
1687 can populate an entry before the property's getter is called. In this case,
1692 can populate an entry before the property's getter is called. In this case,
1688 entries in ``_filecache`` will be used during property operations,
1693 entries in ``_filecache`` will be used during property operations,
1689 if available. If the underlying file changes, it is up to external callers
1694 if available. If the underlying file changes, it is up to external callers
1690 to reflect this by e.g. calling ``delattr(obj, attr)`` to remove the cached
1695 to reflect this by e.g. calling ``delattr(obj, attr)`` to remove the cached
1691 method result as well as possibly calling ``del obj._filecache[attr]`` to
1696 method result as well as possibly calling ``del obj._filecache[attr]`` to
1692 remove the ``filecacheentry``.
1697 remove the ``filecacheentry``.
1693 """
1698 """
1694
1699
1695 def __init__(self, *paths):
1700 def __init__(self, *paths):
1696 self.paths = paths
1701 self.paths = paths
1697
1702
1698 def join(self, obj, fname):
1703 def join(self, obj, fname):
1699 """Used to compute the runtime path of a cached file.
1704 """Used to compute the runtime path of a cached file.
1700
1705
1701 Users should subclass filecache and provide their own version of this
1706 Users should subclass filecache and provide their own version of this
1702 function to call the appropriate join function on 'obj' (an instance
1707 function to call the appropriate join function on 'obj' (an instance
1703 of the class that its member function was decorated).
1708 of the class that its member function was decorated).
1704 """
1709 """
1705 raise NotImplementedError
1710 raise NotImplementedError
1706
1711
1707 def __call__(self, func):
1712 def __call__(self, func):
1708 self.func = func
1713 self.func = func
1709 self.sname = func.__name__
1714 self.sname = func.__name__
1710 self.name = pycompat.sysbytes(self.sname)
1715 self.name = pycompat.sysbytes(self.sname)
1711 return self
1716 return self
1712
1717
1713 def __get__(self, obj, type=None):
1718 def __get__(self, obj, type=None):
1714 # if accessed on the class, return the descriptor itself.
1719 # if accessed on the class, return the descriptor itself.
1715 if obj is None:
1720 if obj is None:
1716 return self
1721 return self
1717
1722
1718 assert self.sname not in obj.__dict__
1723 assert self.sname not in obj.__dict__
1719
1724
1720 entry = obj._filecache.get(self.name)
1725 entry = obj._filecache.get(self.name)
1721
1726
1722 if entry:
1727 if entry:
1723 if entry.changed():
1728 if entry.changed():
1724 entry.obj = self.func(obj)
1729 entry.obj = self.func(obj)
1725 else:
1730 else:
1726 paths = [self.join(obj, path) for path in self.paths]
1731 paths = [self.join(obj, path) for path in self.paths]
1727
1732
1728 # We stat -before- creating the object so our cache doesn't lie if
1733 # We stat -before- creating the object so our cache doesn't lie if
1729 # a writer modified between the time we read and stat
1734 # a writer modified between the time we read and stat
1730 entry = filecacheentry(paths, True)
1735 entry = filecacheentry(paths, True)
1731 entry.obj = self.func(obj)
1736 entry.obj = self.func(obj)
1732
1737
1733 obj._filecache[self.name] = entry
1738 obj._filecache[self.name] = entry
1734
1739
1735 obj.__dict__[self.sname] = entry.obj
1740 obj.__dict__[self.sname] = entry.obj
1736 return entry.obj
1741 return entry.obj
1737
1742
1738 # don't implement __set__(), which would make __dict__ lookup as slow as
1743 # don't implement __set__(), which would make __dict__ lookup as slow as
1739 # function call.
1744 # function call.
1740
1745
1741 def set(self, obj, value):
1746 def set(self, obj, value):
1742 if self.name not in obj._filecache:
1747 if self.name not in obj._filecache:
1743 # we add an entry for the missing value because X in __dict__
1748 # we add an entry for the missing value because X in __dict__
1744 # implies X in _filecache
1749 # implies X in _filecache
1745 paths = [self.join(obj, path) for path in self.paths]
1750 paths = [self.join(obj, path) for path in self.paths]
1746 ce = filecacheentry(paths, False)
1751 ce = filecacheentry(paths, False)
1747 obj._filecache[self.name] = ce
1752 obj._filecache[self.name] = ce
1748 else:
1753 else:
1749 ce = obj._filecache[self.name]
1754 ce = obj._filecache[self.name]
1750
1755
1751 ce.obj = value # update cached copy
1756 ce.obj = value # update cached copy
1752 obj.__dict__[self.sname] = value # update copy returned by obj.x
1757 obj.__dict__[self.sname] = value # update copy returned by obj.x
1753
1758
1754
1759
1755 def extdatasource(repo, source):
1760 def extdatasource(repo, source):
1756 """Gather a map of rev -> value dict from the specified source
1761 """Gather a map of rev -> value dict from the specified source
1757
1762
1758 A source spec is treated as a URL, with a special case shell: type
1763 A source spec is treated as a URL, with a special case shell: type
1759 for parsing the output from a shell command.
1764 for parsing the output from a shell command.
1760
1765
1761 The data is parsed as a series of newline-separated records where
1766 The data is parsed as a series of newline-separated records where
1762 each record is a revision specifier optionally followed by a space
1767 each record is a revision specifier optionally followed by a space
1763 and a freeform string value. If the revision is known locally, it
1768 and a freeform string value. If the revision is known locally, it
1764 is converted to a rev, otherwise the record is skipped.
1769 is converted to a rev, otherwise the record is skipped.
1765
1770
1766 Note that both key and value are treated as UTF-8 and converted to
1771 Note that both key and value are treated as UTF-8 and converted to
1767 the local encoding. This allows uniformity between local and
1772 the local encoding. This allows uniformity between local and
1768 remote data sources.
1773 remote data sources.
1769 """
1774 """
1770
1775
1771 spec = repo.ui.config(b"extdata", source)
1776 spec = repo.ui.config(b"extdata", source)
1772 if not spec:
1777 if not spec:
1773 raise error.Abort(_(b"unknown extdata source '%s'") % source)
1778 raise error.Abort(_(b"unknown extdata source '%s'") % source)
1774
1779
1775 data = {}
1780 data = {}
1776 src = proc = None
1781 src = proc = None
1777 try:
1782 try:
1778 if spec.startswith(b"shell:"):
1783 if spec.startswith(b"shell:"):
1779 # external commands should be run relative to the repo root
1784 # external commands should be run relative to the repo root
1780 cmd = spec[6:]
1785 cmd = spec[6:]
1781 proc = subprocess.Popen(
1786 proc = subprocess.Popen(
1782 procutil.tonativestr(cmd),
1787 procutil.tonativestr(cmd),
1783 shell=True,
1788 shell=True,
1784 bufsize=-1,
1789 bufsize=-1,
1785 close_fds=procutil.closefds,
1790 close_fds=procutil.closefds,
1786 stdout=subprocess.PIPE,
1791 stdout=subprocess.PIPE,
1787 cwd=procutil.tonativestr(repo.root),
1792 cwd=procutil.tonativestr(repo.root),
1788 )
1793 )
1789 src = proc.stdout
1794 src = proc.stdout
1790 else:
1795 else:
1791 # treat as a URL or file
1796 # treat as a URL or file
1792 src = url.open(repo.ui, spec)
1797 src = url.open(repo.ui, spec)
1793 for l in src:
1798 for l in src:
1794 if b" " in l:
1799 if b" " in l:
1795 k, v = l.strip().split(b" ", 1)
1800 k, v = l.strip().split(b" ", 1)
1796 else:
1801 else:
1797 k, v = l.strip(), b""
1802 k, v = l.strip(), b""
1798
1803
1799 k = encoding.tolocal(k)
1804 k = encoding.tolocal(k)
1800 try:
1805 try:
1801 data[revsingle(repo, k).rev()] = encoding.tolocal(v)
1806 data[revsingle(repo, k).rev()] = encoding.tolocal(v)
1802 except (error.LookupError, error.RepoLookupError, error.InputError):
1807 except (error.LookupError, error.RepoLookupError, error.InputError):
1803 pass # we ignore data for nodes that don't exist locally
1808 pass # we ignore data for nodes that don't exist locally
1804 finally:
1809 finally:
1805 if proc:
1810 if proc:
1806 try:
1811 try:
1807 proc.communicate()
1812 proc.communicate()
1808 except ValueError:
1813 except ValueError:
1809 # This happens if we started iterating src and then
1814 # This happens if we started iterating src and then
1810 # get a parse error on a line. It should be safe to ignore.
1815 # get a parse error on a line. It should be safe to ignore.
1811 pass
1816 pass
1812 if src:
1817 if src:
1813 src.close()
1818 src.close()
1814 if proc and proc.returncode != 0:
1819 if proc and proc.returncode != 0:
1815 raise error.Abort(
1820 raise error.Abort(
1816 _(b"extdata command '%s' failed: %s")
1821 _(b"extdata command '%s' failed: %s")
1817 % (cmd, procutil.explainexit(proc.returncode))
1822 % (cmd, procutil.explainexit(proc.returncode))
1818 )
1823 )
1819
1824
1820 return data
1825 return data
1821
1826
1822
1827
1823 class progress(object):
1828 class progress(object):
1824 def __init__(self, ui, updatebar, topic, unit=b"", total=None):
1829 def __init__(self, ui, updatebar, topic, unit=b"", total=None):
1825 self.ui = ui
1830 self.ui = ui
1826 self.pos = 0
1831 self.pos = 0
1827 self.topic = topic
1832 self.topic = topic
1828 self.unit = unit
1833 self.unit = unit
1829 self.total = total
1834 self.total = total
1830 self.debug = ui.configbool(b'progress', b'debug')
1835 self.debug = ui.configbool(b'progress', b'debug')
1831 self._updatebar = updatebar
1836 self._updatebar = updatebar
1832
1837
1833 def __enter__(self):
1838 def __enter__(self):
1834 return self
1839 return self
1835
1840
1836 def __exit__(self, exc_type, exc_value, exc_tb):
1841 def __exit__(self, exc_type, exc_value, exc_tb):
1837 self.complete()
1842 self.complete()
1838
1843
1839 def update(self, pos, item=b"", total=None):
1844 def update(self, pos, item=b"", total=None):
1840 assert pos is not None
1845 assert pos is not None
1841 if total:
1846 if total:
1842 self.total = total
1847 self.total = total
1843 self.pos = pos
1848 self.pos = pos
1844 self._updatebar(self.topic, self.pos, item, self.unit, self.total)
1849 self._updatebar(self.topic, self.pos, item, self.unit, self.total)
1845 if self.debug:
1850 if self.debug:
1846 self._printdebug(item)
1851 self._printdebug(item)
1847
1852
1848 def increment(self, step=1, item=b"", total=None):
1853 def increment(self, step=1, item=b"", total=None):
1849 self.update(self.pos + step, item, total)
1854 self.update(self.pos + step, item, total)
1850
1855
1851 def complete(self):
1856 def complete(self):
1852 self.pos = None
1857 self.pos = None
1853 self.unit = b""
1858 self.unit = b""
1854 self.total = None
1859 self.total = None
1855 self._updatebar(self.topic, self.pos, b"", self.unit, self.total)
1860 self._updatebar(self.topic, self.pos, b"", self.unit, self.total)
1856
1861
1857 def _printdebug(self, item):
1862 def _printdebug(self, item):
1858 unit = b''
1863 unit = b''
1859 if self.unit:
1864 if self.unit:
1860 unit = b' ' + self.unit
1865 unit = b' ' + self.unit
1861 if item:
1866 if item:
1862 item = b' ' + item
1867 item = b' ' + item
1863
1868
1864 if self.total:
1869 if self.total:
1865 pct = 100.0 * self.pos / self.total
1870 pct = 100.0 * self.pos / self.total
1866 self.ui.debug(
1871 self.ui.debug(
1867 b'%s:%s %d/%d%s (%4.2f%%)\n'
1872 b'%s:%s %d/%d%s (%4.2f%%)\n'
1868 % (self.topic, item, self.pos, self.total, unit, pct)
1873 % (self.topic, item, self.pos, self.total, unit, pct)
1869 )
1874 )
1870 else:
1875 else:
1871 self.ui.debug(b'%s:%s %d%s\n' % (self.topic, item, self.pos, unit))
1876 self.ui.debug(b'%s:%s %d%s\n' % (self.topic, item, self.pos, unit))
1872
1877
1873
1878
1874 def gdinitconfig(ui):
1879 def gdinitconfig(ui):
1875 """helper function to know if a repo should be created as general delta"""
1880 """helper function to know if a repo should be created as general delta"""
1876 # experimental config: format.generaldelta
1881 # experimental config: format.generaldelta
1877 return ui.configbool(b'format', b'generaldelta') or ui.configbool(
1882 return ui.configbool(b'format', b'generaldelta') or ui.configbool(
1878 b'format', b'usegeneraldelta'
1883 b'format', b'usegeneraldelta'
1879 )
1884 )
1880
1885
1881
1886
1882 def gddeltaconfig(ui):
1887 def gddeltaconfig(ui):
1883 """helper function to know if incoming delta should be optimised"""
1888 """helper function to know if incoming delta should be optimised"""
1884 # experimental config: format.generaldelta
1889 # experimental config: format.generaldelta
1885 return ui.configbool(b'format', b'generaldelta')
1890 return ui.configbool(b'format', b'generaldelta')
1886
1891
1887
1892
1888 class simplekeyvaluefile(object):
1893 class simplekeyvaluefile(object):
1889 """A simple file with key=value lines
1894 """A simple file with key=value lines
1890
1895
1891 Keys must be alphanumerics and start with a letter, values must not
1896 Keys must be alphanumerics and start with a letter, values must not
1892 contain '\n' characters"""
1897 contain '\n' characters"""
1893
1898
1894 firstlinekey = b'__firstline'
1899 firstlinekey = b'__firstline'
1895
1900
1896 def __init__(self, vfs, path, keys=None):
1901 def __init__(self, vfs, path, keys=None):
1897 self.vfs = vfs
1902 self.vfs = vfs
1898 self.path = path
1903 self.path = path
1899
1904
1900 def read(self, firstlinenonkeyval=False):
1905 def read(self, firstlinenonkeyval=False):
1901 """Read the contents of a simple key-value file
1906 """Read the contents of a simple key-value file
1902
1907
1903 'firstlinenonkeyval' indicates whether the first line of file should
1908 'firstlinenonkeyval' indicates whether the first line of file should
1904 be treated as a key-value pair or reuturned fully under the
1909 be treated as a key-value pair or reuturned fully under the
1905 __firstline key."""
1910 __firstline key."""
1906 lines = self.vfs.readlines(self.path)
1911 lines = self.vfs.readlines(self.path)
1907 d = {}
1912 d = {}
1908 if firstlinenonkeyval:
1913 if firstlinenonkeyval:
1909 if not lines:
1914 if not lines:
1910 e = _(b"empty simplekeyvalue file")
1915 e = _(b"empty simplekeyvalue file")
1911 raise error.CorruptedState(e)
1916 raise error.CorruptedState(e)
1912 # we don't want to include '\n' in the __firstline
1917 # we don't want to include '\n' in the __firstline
1913 d[self.firstlinekey] = lines[0][:-1]
1918 d[self.firstlinekey] = lines[0][:-1]
1914 del lines[0]
1919 del lines[0]
1915
1920
1916 try:
1921 try:
1917 # the 'if line.strip()' part prevents us from failing on empty
1922 # the 'if line.strip()' part prevents us from failing on empty
1918 # lines which only contain '\n' therefore are not skipped
1923 # lines which only contain '\n' therefore are not skipped
1919 # by 'if line'
1924 # by 'if line'
1920 updatedict = dict(
1925 updatedict = dict(
1921 line[:-1].split(b'=', 1) for line in lines if line.strip()
1926 line[:-1].split(b'=', 1) for line in lines if line.strip()
1922 )
1927 )
1923 if self.firstlinekey in updatedict:
1928 if self.firstlinekey in updatedict:
1924 e = _(b"%r can't be used as a key")
1929 e = _(b"%r can't be used as a key")
1925 raise error.CorruptedState(e % self.firstlinekey)
1930 raise error.CorruptedState(e % self.firstlinekey)
1926 d.update(updatedict)
1931 d.update(updatedict)
1927 except ValueError as e:
1932 except ValueError as e:
1928 raise error.CorruptedState(stringutil.forcebytestr(e))
1933 raise error.CorruptedState(stringutil.forcebytestr(e))
1929 return d
1934 return d
1930
1935
1931 def write(self, data, firstline=None):
1936 def write(self, data, firstline=None):
1932 """Write key=>value mapping to a file
1937 """Write key=>value mapping to a file
1933 data is a dict. Keys must be alphanumerical and start with a letter.
1938 data is a dict. Keys must be alphanumerical and start with a letter.
1934 Values must not contain newline characters.
1939 Values must not contain newline characters.
1935
1940
1936 If 'firstline' is not None, it is written to file before
1941 If 'firstline' is not None, it is written to file before
1937 everything else, as it is, not in a key=value form"""
1942 everything else, as it is, not in a key=value form"""
1938 lines = []
1943 lines = []
1939 if firstline is not None:
1944 if firstline is not None:
1940 lines.append(b'%s\n' % firstline)
1945 lines.append(b'%s\n' % firstline)
1941
1946
1942 for k, v in data.items():
1947 for k, v in data.items():
1943 if k == self.firstlinekey:
1948 if k == self.firstlinekey:
1944 e = b"key name '%s' is reserved" % self.firstlinekey
1949 e = b"key name '%s' is reserved" % self.firstlinekey
1945 raise error.ProgrammingError(e)
1950 raise error.ProgrammingError(e)
1946 if not k[0:1].isalpha():
1951 if not k[0:1].isalpha():
1947 e = b"keys must start with a letter in a key-value file"
1952 e = b"keys must start with a letter in a key-value file"
1948 raise error.ProgrammingError(e)
1953 raise error.ProgrammingError(e)
1949 if not k.isalnum():
1954 if not k.isalnum():
1950 e = b"invalid key name in a simple key-value file"
1955 e = b"invalid key name in a simple key-value file"
1951 raise error.ProgrammingError(e)
1956 raise error.ProgrammingError(e)
1952 if b'\n' in v:
1957 if b'\n' in v:
1953 e = b"invalid value in a simple key-value file"
1958 e = b"invalid value in a simple key-value file"
1954 raise error.ProgrammingError(e)
1959 raise error.ProgrammingError(e)
1955 lines.append(b"%s=%s\n" % (k, v))
1960 lines.append(b"%s=%s\n" % (k, v))
1956 with self.vfs(self.path, mode=b'wb', atomictemp=True) as fp:
1961 with self.vfs(self.path, mode=b'wb', atomictemp=True) as fp:
1957 fp.write(b''.join(lines))
1962 fp.write(b''.join(lines))
1958
1963
1959
1964
1960 _reportobsoletedsource = [
1965 _reportobsoletedsource = [
1961 b'debugobsolete',
1966 b'debugobsolete',
1962 b'pull',
1967 b'pull',
1963 b'push',
1968 b'push',
1964 b'serve',
1969 b'serve',
1965 b'unbundle',
1970 b'unbundle',
1966 ]
1971 ]
1967
1972
1968 _reportnewcssource = [
1973 _reportnewcssource = [
1969 b'pull',
1974 b'pull',
1970 b'unbundle',
1975 b'unbundle',
1971 ]
1976 ]
1972
1977
1973
1978
1974 def prefetchfiles(repo, revmatches):
1979 def prefetchfiles(repo, revmatches):
1975 """Invokes the registered file prefetch functions, allowing extensions to
1980 """Invokes the registered file prefetch functions, allowing extensions to
1976 ensure the corresponding files are available locally, before the command
1981 ensure the corresponding files are available locally, before the command
1977 uses them.
1982 uses them.
1978
1983
1979 Args:
1984 Args:
1980 revmatches: a list of (revision, match) tuples to indicate the files to
1985 revmatches: a list of (revision, match) tuples to indicate the files to
1981 fetch at each revision. If any of the match elements is None, it matches
1986 fetch at each revision. If any of the match elements is None, it matches
1982 all files.
1987 all files.
1983 """
1988 """
1984
1989
1985 def _matcher(m):
1990 def _matcher(m):
1986 if m:
1991 if m:
1987 assert isinstance(m, matchmod.basematcher)
1992 assert isinstance(m, matchmod.basematcher)
1988 # The command itself will complain about files that don't exist, so
1993 # The command itself will complain about files that don't exist, so
1989 # don't duplicate the message.
1994 # don't duplicate the message.
1990 return matchmod.badmatch(m, lambda fn, msg: None)
1995 return matchmod.badmatch(m, lambda fn, msg: None)
1991 else:
1996 else:
1992 return matchall(repo)
1997 return matchall(repo)
1993
1998
1994 revbadmatches = [(rev, _matcher(match)) for (rev, match) in revmatches]
1999 revbadmatches = [(rev, _matcher(match)) for (rev, match) in revmatches]
1995
2000
1996 fileprefetchhooks(repo, revbadmatches)
2001 fileprefetchhooks(repo, revbadmatches)
1997
2002
1998
2003
1999 # a list of (repo, revs, match) prefetch functions
2004 # a list of (repo, revs, match) prefetch functions
2000 fileprefetchhooks = util.hooks()
2005 fileprefetchhooks = util.hooks()
2001
2006
2002 # A marker that tells the evolve extension to suppress its own reporting
2007 # A marker that tells the evolve extension to suppress its own reporting
2003 _reportstroubledchangesets = True
2008 _reportstroubledchangesets = True
2004
2009
2005
2010
2006 def registersummarycallback(repo, otr, txnname=b'', as_validator=False):
2011 def registersummarycallback(repo, otr, txnname=b'', as_validator=False):
2007 """register a callback to issue a summary after the transaction is closed
2012 """register a callback to issue a summary after the transaction is closed
2008
2013
2009 If as_validator is true, then the callbacks are registered as transaction
2014 If as_validator is true, then the callbacks are registered as transaction
2010 validators instead
2015 validators instead
2011 """
2016 """
2012
2017
2013 def txmatch(sources):
2018 def txmatch(sources):
2014 return any(txnname.startswith(source) for source in sources)
2019 return any(txnname.startswith(source) for source in sources)
2015
2020
2016 categories = []
2021 categories = []
2017
2022
2018 def reportsummary(func):
2023 def reportsummary(func):
2019 """decorator for report callbacks."""
2024 """decorator for report callbacks."""
2020 # The repoview life cycle is shorter than the one of the actual
2025 # The repoview life cycle is shorter than the one of the actual
2021 # underlying repository. So the filtered object can die before the
2026 # underlying repository. So the filtered object can die before the
2022 # weakref is used leading to troubles. We keep a reference to the
2027 # weakref is used leading to troubles. We keep a reference to the
2023 # unfiltered object and restore the filtering when retrieving the
2028 # unfiltered object and restore the filtering when retrieving the
2024 # repository through the weakref.
2029 # repository through the weakref.
2025 filtername = repo.filtername
2030 filtername = repo.filtername
2026 reporef = weakref.ref(repo.unfiltered())
2031 reporef = weakref.ref(repo.unfiltered())
2027
2032
2028 def wrapped(tr):
2033 def wrapped(tr):
2029 repo = reporef()
2034 repo = reporef()
2030 if filtername:
2035 if filtername:
2031 assert repo is not None # help pytype
2036 assert repo is not None # help pytype
2032 repo = repo.filtered(filtername)
2037 repo = repo.filtered(filtername)
2033 func(repo, tr)
2038 func(repo, tr)
2034
2039
2035 newcat = b'%02i-txnreport' % len(categories)
2040 newcat = b'%02i-txnreport' % len(categories)
2036 if as_validator:
2041 if as_validator:
2037 otr.addvalidator(newcat, wrapped)
2042 otr.addvalidator(newcat, wrapped)
2038 else:
2043 else:
2039 otr.addpostclose(newcat, wrapped)
2044 otr.addpostclose(newcat, wrapped)
2040 categories.append(newcat)
2045 categories.append(newcat)
2041 return wrapped
2046 return wrapped
2042
2047
2043 @reportsummary
2048 @reportsummary
2044 def reportchangegroup(repo, tr):
2049 def reportchangegroup(repo, tr):
2045 cgchangesets = tr.changes.get(b'changegroup-count-changesets', 0)
2050 cgchangesets = tr.changes.get(b'changegroup-count-changesets', 0)
2046 cgrevisions = tr.changes.get(b'changegroup-count-revisions', 0)
2051 cgrevisions = tr.changes.get(b'changegroup-count-revisions', 0)
2047 cgfiles = tr.changes.get(b'changegroup-count-files', 0)
2052 cgfiles = tr.changes.get(b'changegroup-count-files', 0)
2048 cgheads = tr.changes.get(b'changegroup-count-heads', 0)
2053 cgheads = tr.changes.get(b'changegroup-count-heads', 0)
2049 if cgchangesets or cgrevisions or cgfiles:
2054 if cgchangesets or cgrevisions or cgfiles:
2050 htext = b""
2055 htext = b""
2051 if cgheads:
2056 if cgheads:
2052 htext = _(b" (%+d heads)") % cgheads
2057 htext = _(b" (%+d heads)") % cgheads
2053 msg = _(b"added %d changesets with %d changes to %d files%s\n")
2058 msg = _(b"added %d changesets with %d changes to %d files%s\n")
2054 if as_validator:
2059 if as_validator:
2055 msg = _(b"adding %d changesets with %d changes to %d files%s\n")
2060 msg = _(b"adding %d changesets with %d changes to %d files%s\n")
2056 assert repo is not None # help pytype
2061 assert repo is not None # help pytype
2057 repo.ui.status(msg % (cgchangesets, cgrevisions, cgfiles, htext))
2062 repo.ui.status(msg % (cgchangesets, cgrevisions, cgfiles, htext))
2058
2063
2059 if txmatch(_reportobsoletedsource):
2064 if txmatch(_reportobsoletedsource):
2060
2065
2061 @reportsummary
2066 @reportsummary
2062 def reportobsoleted(repo, tr):
2067 def reportobsoleted(repo, tr):
2063 obsoleted = obsutil.getobsoleted(repo, tr)
2068 obsoleted = obsutil.getobsoleted(repo, tr)
2064 newmarkers = len(tr.changes.get(b'obsmarkers', ()))
2069 newmarkers = len(tr.changes.get(b'obsmarkers', ()))
2065 if newmarkers:
2070 if newmarkers:
2066 repo.ui.status(_(b'%i new obsolescence markers\n') % newmarkers)
2071 repo.ui.status(_(b'%i new obsolescence markers\n') % newmarkers)
2067 if obsoleted:
2072 if obsoleted:
2068 msg = _(b'obsoleted %i changesets\n')
2073 msg = _(b'obsoleted %i changesets\n')
2069 if as_validator:
2074 if as_validator:
2070 msg = _(b'obsoleting %i changesets\n')
2075 msg = _(b'obsoleting %i changesets\n')
2071 repo.ui.status(msg % len(obsoleted))
2076 repo.ui.status(msg % len(obsoleted))
2072
2077
2073 if obsolete.isenabled(
2078 if obsolete.isenabled(
2074 repo, obsolete.createmarkersopt
2079 repo, obsolete.createmarkersopt
2075 ) and repo.ui.configbool(
2080 ) and repo.ui.configbool(
2076 b'experimental', b'evolution.report-instabilities'
2081 b'experimental', b'evolution.report-instabilities'
2077 ):
2082 ):
2078 instabilitytypes = [
2083 instabilitytypes = [
2079 (b'orphan', b'orphan'),
2084 (b'orphan', b'orphan'),
2080 (b'phase-divergent', b'phasedivergent'),
2085 (b'phase-divergent', b'phasedivergent'),
2081 (b'content-divergent', b'contentdivergent'),
2086 (b'content-divergent', b'contentdivergent'),
2082 ]
2087 ]
2083
2088
2084 def getinstabilitycounts(repo):
2089 def getinstabilitycounts(repo):
2085 filtered = repo.changelog.filteredrevs
2090 filtered = repo.changelog.filteredrevs
2086 counts = {}
2091 counts = {}
2087 for instability, revset in instabilitytypes:
2092 for instability, revset in instabilitytypes:
2088 counts[instability] = len(
2093 counts[instability] = len(
2089 set(obsolete.getrevs(repo, revset)) - filtered
2094 set(obsolete.getrevs(repo, revset)) - filtered
2090 )
2095 )
2091 return counts
2096 return counts
2092
2097
2093 oldinstabilitycounts = getinstabilitycounts(repo)
2098 oldinstabilitycounts = getinstabilitycounts(repo)
2094
2099
2095 @reportsummary
2100 @reportsummary
2096 def reportnewinstabilities(repo, tr):
2101 def reportnewinstabilities(repo, tr):
2097 newinstabilitycounts = getinstabilitycounts(repo)
2102 newinstabilitycounts = getinstabilitycounts(repo)
2098 for instability, revset in instabilitytypes:
2103 for instability, revset in instabilitytypes:
2099 delta = (
2104 delta = (
2100 newinstabilitycounts[instability]
2105 newinstabilitycounts[instability]
2101 - oldinstabilitycounts[instability]
2106 - oldinstabilitycounts[instability]
2102 )
2107 )
2103 msg = getinstabilitymessage(delta, instability)
2108 msg = getinstabilitymessage(delta, instability)
2104 if msg:
2109 if msg:
2105 repo.ui.warn(msg)
2110 repo.ui.warn(msg)
2106
2111
2107 if txmatch(_reportnewcssource):
2112 if txmatch(_reportnewcssource):
2108
2113
2109 @reportsummary
2114 @reportsummary
2110 def reportnewcs(repo, tr):
2115 def reportnewcs(repo, tr):
2111 """Report the range of new revisions pulled/unbundled."""
2116 """Report the range of new revisions pulled/unbundled."""
2112 origrepolen = tr.changes.get(b'origrepolen', len(repo))
2117 origrepolen = tr.changes.get(b'origrepolen', len(repo))
2113 unfi = repo.unfiltered()
2118 unfi = repo.unfiltered()
2114 if origrepolen >= len(unfi):
2119 if origrepolen >= len(unfi):
2115 return
2120 return
2116
2121
2117 # Compute the bounds of new visible revisions' range.
2122 # Compute the bounds of new visible revisions' range.
2118 revs = smartset.spanset(repo, start=origrepolen)
2123 revs = smartset.spanset(repo, start=origrepolen)
2119 if revs:
2124 if revs:
2120 minrev, maxrev = repo[revs.min()], repo[revs.max()]
2125 minrev, maxrev = repo[revs.min()], repo[revs.max()]
2121
2126
2122 if minrev == maxrev:
2127 if minrev == maxrev:
2123 revrange = minrev
2128 revrange = minrev
2124 else:
2129 else:
2125 revrange = b'%s:%s' % (minrev, maxrev)
2130 revrange = b'%s:%s' % (minrev, maxrev)
2126 draft = len(repo.revs(b'%ld and draft()', revs))
2131 draft = len(repo.revs(b'%ld and draft()', revs))
2127 secret = len(repo.revs(b'%ld and secret()', revs))
2132 secret = len(repo.revs(b'%ld and secret()', revs))
2128 if not (draft or secret):
2133 if not (draft or secret):
2129 msg = _(b'new changesets %s\n') % revrange
2134 msg = _(b'new changesets %s\n') % revrange
2130 elif draft and secret:
2135 elif draft and secret:
2131 msg = _(b'new changesets %s (%d drafts, %d secrets)\n')
2136 msg = _(b'new changesets %s (%d drafts, %d secrets)\n')
2132 msg %= (revrange, draft, secret)
2137 msg %= (revrange, draft, secret)
2133 elif draft:
2138 elif draft:
2134 msg = _(b'new changesets %s (%d drafts)\n')
2139 msg = _(b'new changesets %s (%d drafts)\n')
2135 msg %= (revrange, draft)
2140 msg %= (revrange, draft)
2136 elif secret:
2141 elif secret:
2137 msg = _(b'new changesets %s (%d secrets)\n')
2142 msg = _(b'new changesets %s (%d secrets)\n')
2138 msg %= (revrange, secret)
2143 msg %= (revrange, secret)
2139 else:
2144 else:
2140 errormsg = b'entered unreachable condition'
2145 errormsg = b'entered unreachable condition'
2141 raise error.ProgrammingError(errormsg)
2146 raise error.ProgrammingError(errormsg)
2142 repo.ui.status(msg)
2147 repo.ui.status(msg)
2143
2148
2144 # search new changesets directly pulled as obsolete
2149 # search new changesets directly pulled as obsolete
2145 duplicates = tr.changes.get(b'revduplicates', ())
2150 duplicates = tr.changes.get(b'revduplicates', ())
2146 obsadded = unfi.revs(
2151 obsadded = unfi.revs(
2147 b'(%d: + %ld) and obsolete()', origrepolen, duplicates
2152 b'(%d: + %ld) and obsolete()', origrepolen, duplicates
2148 )
2153 )
2149 cl = repo.changelog
2154 cl = repo.changelog
2150 extinctadded = [r for r in obsadded if r not in cl]
2155 extinctadded = [r for r in obsadded if r not in cl]
2151 if extinctadded:
2156 if extinctadded:
2152 # They are not just obsolete, but obsolete and invisible
2157 # They are not just obsolete, but obsolete and invisible
2153 # we call them "extinct" internally but the terms have not been
2158 # we call them "extinct" internally but the terms have not been
2154 # exposed to users.
2159 # exposed to users.
2155 msg = b'(%d other changesets obsolete on arrival)\n'
2160 msg = b'(%d other changesets obsolete on arrival)\n'
2156 repo.ui.status(msg % len(extinctadded))
2161 repo.ui.status(msg % len(extinctadded))
2157
2162
2158 @reportsummary
2163 @reportsummary
2159 def reportphasechanges(repo, tr):
2164 def reportphasechanges(repo, tr):
2160 """Report statistics of phase changes for changesets pre-existing
2165 """Report statistics of phase changes for changesets pre-existing
2161 pull/unbundle.
2166 pull/unbundle.
2162 """
2167 """
2163 origrepolen = tr.changes.get(b'origrepolen', len(repo))
2168 origrepolen = tr.changes.get(b'origrepolen', len(repo))
2164 published = []
2169 published = []
2165 for revs, (old, new) in tr.changes.get(b'phases', []):
2170 for revs, (old, new) in tr.changes.get(b'phases', []):
2166 if new != phases.public:
2171 if new != phases.public:
2167 continue
2172 continue
2168 published.extend(rev for rev in revs if rev < origrepolen)
2173 published.extend(rev for rev in revs if rev < origrepolen)
2169 if not published:
2174 if not published:
2170 return
2175 return
2171 msg = _(b'%d local changesets published\n')
2176 msg = _(b'%d local changesets published\n')
2172 if as_validator:
2177 if as_validator:
2173 msg = _(b'%d local changesets will be published\n')
2178 msg = _(b'%d local changesets will be published\n')
2174 repo.ui.status(msg % len(published))
2179 repo.ui.status(msg % len(published))
2175
2180
2176
2181
2177 def getinstabilitymessage(delta, instability):
2182 def getinstabilitymessage(delta, instability):
2178 """function to return the message to show warning about new instabilities
2183 """function to return the message to show warning about new instabilities
2179
2184
2180 exists as a separate function so that extension can wrap to show more
2185 exists as a separate function so that extension can wrap to show more
2181 information like how to fix instabilities"""
2186 information like how to fix instabilities"""
2182 if delta > 0:
2187 if delta > 0:
2183 return _(b'%i new %s changesets\n') % (delta, instability)
2188 return _(b'%i new %s changesets\n') % (delta, instability)
2184
2189
2185
2190
2186 def nodesummaries(repo, nodes, maxnumnodes=4):
2191 def nodesummaries(repo, nodes, maxnumnodes=4):
2187 if len(nodes) <= maxnumnodes or repo.ui.verbose:
2192 if len(nodes) <= maxnumnodes or repo.ui.verbose:
2188 return b' '.join(short(h) for h in nodes)
2193 return b' '.join(short(h) for h in nodes)
2189 first = b' '.join(short(h) for h in nodes[:maxnumnodes])
2194 first = b' '.join(short(h) for h in nodes[:maxnumnodes])
2190 return _(b"%s and %d others") % (first, len(nodes) - maxnumnodes)
2195 return _(b"%s and %d others") % (first, len(nodes) - maxnumnodes)
2191
2196
2192
2197
2193 def enforcesinglehead(repo, tr, desc, accountclosed, filtername):
2198 def enforcesinglehead(repo, tr, desc, accountclosed, filtername):
2194 """check that no named branch has multiple heads"""
2199 """check that no named branch has multiple heads"""
2195 if desc in (b'strip', b'repair'):
2200 if desc in (b'strip', b'repair'):
2196 # skip the logic during strip
2201 # skip the logic during strip
2197 return
2202 return
2198 visible = repo.filtered(filtername)
2203 visible = repo.filtered(filtername)
2199 # possible improvement: we could restrict the check to affected branch
2204 # possible improvement: we could restrict the check to affected branch
2200 bm = visible.branchmap()
2205 bm = visible.branchmap()
2201 for name in bm:
2206 for name in bm:
2202 heads = bm.branchheads(name, closed=accountclosed)
2207 heads = bm.branchheads(name, closed=accountclosed)
2203 if len(heads) > 1:
2208 if len(heads) > 1:
2204 msg = _(b'rejecting multiple heads on branch "%s"')
2209 msg = _(b'rejecting multiple heads on branch "%s"')
2205 msg %= name
2210 msg %= name
2206 hint = _(b'%d heads: %s')
2211 hint = _(b'%d heads: %s')
2207 hint %= (len(heads), nodesummaries(repo, heads))
2212 hint %= (len(heads), nodesummaries(repo, heads))
2208 raise error.Abort(msg, hint=hint)
2213 raise error.Abort(msg, hint=hint)
2209
2214
2210
2215
2211 def wrapconvertsink(sink):
2216 def wrapconvertsink(sink):
2212 """Allow extensions to wrap the sink returned by convcmd.convertsink()
2217 """Allow extensions to wrap the sink returned by convcmd.convertsink()
2213 before it is used, whether or not the convert extension was formally loaded.
2218 before it is used, whether or not the convert extension was formally loaded.
2214 """
2219 """
2215 return sink
2220 return sink
2216
2221
2217
2222
2218 def unhidehashlikerevs(repo, specs, hiddentype):
2223 def unhidehashlikerevs(repo, specs, hiddentype):
2219 """parse the user specs and unhide changesets whose hash or revision number
2224 """parse the user specs and unhide changesets whose hash or revision number
2220 is passed.
2225 is passed.
2221
2226
2222 hiddentype can be: 1) 'warn': warn while unhiding changesets
2227 hiddentype can be: 1) 'warn': warn while unhiding changesets
2223 2) 'nowarn': don't warn while unhiding changesets
2228 2) 'nowarn': don't warn while unhiding changesets
2224
2229
2225 returns a repo object with the required changesets unhidden
2230 returns a repo object with the required changesets unhidden
2226 """
2231 """
2227 if not repo.filtername or not repo.ui.configbool(
2232 if not repo.filtername or not repo.ui.configbool(
2228 b'experimental', b'directaccess'
2233 b'experimental', b'directaccess'
2229 ):
2234 ):
2230 return repo
2235 return repo
2231
2236
2232 if repo.filtername not in (b'visible', b'visible-hidden'):
2237 if repo.filtername not in (b'visible', b'visible-hidden'):
2233 return repo
2238 return repo
2234
2239
2235 symbols = set()
2240 symbols = set()
2236 for spec in specs:
2241 for spec in specs:
2237 try:
2242 try:
2238 tree = revsetlang.parse(spec)
2243 tree = revsetlang.parse(spec)
2239 except error.ParseError: # will be reported by scmutil.revrange()
2244 except error.ParseError: # will be reported by scmutil.revrange()
2240 continue
2245 continue
2241
2246
2242 symbols.update(revsetlang.gethashlikesymbols(tree))
2247 symbols.update(revsetlang.gethashlikesymbols(tree))
2243
2248
2244 if not symbols:
2249 if not symbols:
2245 return repo
2250 return repo
2246
2251
2247 revs = _getrevsfromsymbols(repo, symbols)
2252 revs = _getrevsfromsymbols(repo, symbols)
2248
2253
2249 if not revs:
2254 if not revs:
2250 return repo
2255 return repo
2251
2256
2252 if hiddentype == b'warn':
2257 if hiddentype == b'warn':
2253 unfi = repo.unfiltered()
2258 unfi = repo.unfiltered()
2254 revstr = b", ".join([pycompat.bytestr(unfi[l]) for l in revs])
2259 revstr = b", ".join([pycompat.bytestr(unfi[l]) for l in revs])
2255 repo.ui.warn(
2260 repo.ui.warn(
2256 _(
2261 _(
2257 b"warning: accessing hidden changesets for write "
2262 b"warning: accessing hidden changesets for write "
2258 b"operation: %s\n"
2263 b"operation: %s\n"
2259 )
2264 )
2260 % revstr
2265 % revstr
2261 )
2266 )
2262
2267
2263 # we have to use new filtername to separate branch/tags cache until we can
2268 # we have to use new filtername to separate branch/tags cache until we can
2264 # disbale these cache when revisions are dynamically pinned.
2269 # disbale these cache when revisions are dynamically pinned.
2265 return repo.filtered(b'visible-hidden', revs)
2270 return repo.filtered(b'visible-hidden', revs)
2266
2271
2267
2272
2268 def _getrevsfromsymbols(repo, symbols):
2273 def _getrevsfromsymbols(repo, symbols):
2269 """parse the list of symbols and returns a set of revision numbers of hidden
2274 """parse the list of symbols and returns a set of revision numbers of hidden
2270 changesets present in symbols"""
2275 changesets present in symbols"""
2271 revs = set()
2276 revs = set()
2272 unfi = repo.unfiltered()
2277 unfi = repo.unfiltered()
2273 unficl = unfi.changelog
2278 unficl = unfi.changelog
2274 cl = repo.changelog
2279 cl = repo.changelog
2275 tiprev = len(unficl)
2280 tiprev = len(unficl)
2276 allowrevnums = repo.ui.configbool(b'experimental', b'directaccess.revnums')
2281 allowrevnums = repo.ui.configbool(b'experimental', b'directaccess.revnums')
2277 for s in symbols:
2282 for s in symbols:
2278 try:
2283 try:
2279 n = int(s)
2284 n = int(s)
2280 if n <= tiprev:
2285 if n <= tiprev:
2281 if not allowrevnums:
2286 if not allowrevnums:
2282 continue
2287 continue
2283 else:
2288 else:
2284 if n not in cl:
2289 if n not in cl:
2285 revs.add(n)
2290 revs.add(n)
2286 continue
2291 continue
2287 except ValueError:
2292 except ValueError:
2288 pass
2293 pass
2289
2294
2290 try:
2295 try:
2291 s = resolvehexnodeidprefix(unfi, s)
2296 s = resolvehexnodeidprefix(unfi, s)
2292 except (error.LookupError, error.WdirUnsupported):
2297 except (error.LookupError, error.WdirUnsupported):
2293 s = None
2298 s = None
2294
2299
2295 if s is not None:
2300 if s is not None:
2296 rev = unficl.rev(s)
2301 rev = unficl.rev(s)
2297 if rev not in cl:
2302 if rev not in cl:
2298 revs.add(rev)
2303 revs.add(rev)
2299
2304
2300 return revs
2305 return revs
2301
2306
2302
2307
2303 def bookmarkrevs(repo, mark):
2308 def bookmarkrevs(repo, mark):
2304 """Select revisions reachable by a given bookmark
2309 """Select revisions reachable by a given bookmark
2305
2310
2306 If the bookmarked revision isn't a head, an empty set will be returned.
2311 If the bookmarked revision isn't a head, an empty set will be returned.
2307 """
2312 """
2308 return repo.revs(format_bookmark_revspec(mark))
2313 return repo.revs(format_bookmark_revspec(mark))
2309
2314
2310
2315
2311 def format_bookmark_revspec(mark):
2316 def format_bookmark_revspec(mark):
2312 """Build a revset expression to select revisions reachable by a given
2317 """Build a revset expression to select revisions reachable by a given
2313 bookmark"""
2318 bookmark"""
2314 mark = b'literal:' + mark
2319 mark = b'literal:' + mark
2315 return revsetlang.formatspec(
2320 return revsetlang.formatspec(
2316 b"ancestors(bookmark(%s)) - "
2321 b"ancestors(bookmark(%s)) - "
2317 b"ancestors(head() and not bookmark(%s)) - "
2322 b"ancestors(head() and not bookmark(%s)) - "
2318 b"ancestors(bookmark() and not bookmark(%s))",
2323 b"ancestors(bookmark() and not bookmark(%s))",
2319 mark,
2324 mark,
2320 mark,
2325 mark,
2321 mark,
2326 mark,
2322 )
2327 )
General Comments 0
You need to be logged in to leave comments. Login now