##// END OF EJS Templates
errors: use InputError for bad --similarity value...
Martin von Zweigbergk -
r48845:5b89626c default
parent child Browse files
Show More
@@ -1,2289 +1,2289 b''
1 # scmutil.py - Mercurial core utility functions
1 # scmutil.py - Mercurial core utility functions
2 #
2 #
3 # Copyright Olivia Mackall <olivia@selenic.com>
3 # Copyright Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import glob
11 import glob
12 import os
12 import os
13 import posixpath
13 import posixpath
14 import re
14 import re
15 import subprocess
15 import subprocess
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 bin,
20 bin,
21 hex,
21 hex,
22 nullrev,
22 nullrev,
23 short,
23 short,
24 wdirrev,
24 wdirrev,
25 )
25 )
26 from .pycompat import getattr
26 from .pycompat import getattr
27 from .thirdparty import attr
27 from .thirdparty import attr
28 from . import (
28 from . import (
29 copies as copiesmod,
29 copies as copiesmod,
30 encoding,
30 encoding,
31 error,
31 error,
32 match as matchmod,
32 match as matchmod,
33 obsolete,
33 obsolete,
34 obsutil,
34 obsutil,
35 pathutil,
35 pathutil,
36 phases,
36 phases,
37 policy,
37 policy,
38 pycompat,
38 pycompat,
39 requirements as requirementsmod,
39 requirements as requirementsmod,
40 revsetlang,
40 revsetlang,
41 similar,
41 similar,
42 smartset,
42 smartset,
43 url,
43 url,
44 util,
44 util,
45 vfs,
45 vfs,
46 )
46 )
47
47
48 from .utils import (
48 from .utils import (
49 hashutil,
49 hashutil,
50 procutil,
50 procutil,
51 stringutil,
51 stringutil,
52 )
52 )
53
53
54 if pycompat.iswindows:
54 if pycompat.iswindows:
55 from . import scmwindows as scmplatform
55 from . import scmwindows as scmplatform
56 else:
56 else:
57 from . import scmposix as scmplatform
57 from . import scmposix as scmplatform
58
58
59 parsers = policy.importmod('parsers')
59 parsers = policy.importmod('parsers')
60 rustrevlog = policy.importrust('revlog')
60 rustrevlog = policy.importrust('revlog')
61
61
62 termsize = scmplatform.termsize
62 termsize = scmplatform.termsize
63
63
64
64
65 @attr.s(slots=True, repr=False)
65 @attr.s(slots=True, repr=False)
66 class status(object):
66 class status(object):
67 """Struct with a list of files per status.
67 """Struct with a list of files per status.
68
68
69 The 'deleted', 'unknown' and 'ignored' properties are only
69 The 'deleted', 'unknown' and 'ignored' properties are only
70 relevant to the working copy.
70 relevant to the working copy.
71 """
71 """
72
72
73 modified = attr.ib(default=attr.Factory(list))
73 modified = attr.ib(default=attr.Factory(list))
74 added = attr.ib(default=attr.Factory(list))
74 added = attr.ib(default=attr.Factory(list))
75 removed = attr.ib(default=attr.Factory(list))
75 removed = attr.ib(default=attr.Factory(list))
76 deleted = attr.ib(default=attr.Factory(list))
76 deleted = attr.ib(default=attr.Factory(list))
77 unknown = attr.ib(default=attr.Factory(list))
77 unknown = attr.ib(default=attr.Factory(list))
78 ignored = attr.ib(default=attr.Factory(list))
78 ignored = attr.ib(default=attr.Factory(list))
79 clean = attr.ib(default=attr.Factory(list))
79 clean = attr.ib(default=attr.Factory(list))
80
80
81 def __iter__(self):
81 def __iter__(self):
82 yield self.modified
82 yield self.modified
83 yield self.added
83 yield self.added
84 yield self.removed
84 yield self.removed
85 yield self.deleted
85 yield self.deleted
86 yield self.unknown
86 yield self.unknown
87 yield self.ignored
87 yield self.ignored
88 yield self.clean
88 yield self.clean
89
89
90 def __repr__(self):
90 def __repr__(self):
91 return (
91 return (
92 r'<status modified=%s, added=%s, removed=%s, deleted=%s, '
92 r'<status modified=%s, added=%s, removed=%s, deleted=%s, '
93 r'unknown=%s, ignored=%s, clean=%s>'
93 r'unknown=%s, ignored=%s, clean=%s>'
94 ) % tuple(pycompat.sysstr(stringutil.pprint(v)) for v in self)
94 ) % tuple(pycompat.sysstr(stringutil.pprint(v)) for v in self)
95
95
96
96
97 def itersubrepos(ctx1, ctx2):
97 def itersubrepos(ctx1, ctx2):
98 """find subrepos in ctx1 or ctx2"""
98 """find subrepos in ctx1 or ctx2"""
99 # Create a (subpath, ctx) mapping where we prefer subpaths from
99 # Create a (subpath, ctx) mapping where we prefer subpaths from
100 # ctx1. The subpaths from ctx2 are important when the .hgsub file
100 # ctx1. The subpaths from ctx2 are important when the .hgsub file
101 # has been modified (in ctx2) but not yet committed (in ctx1).
101 # has been modified (in ctx2) but not yet committed (in ctx1).
102 subpaths = dict.fromkeys(ctx2.substate, ctx2)
102 subpaths = dict.fromkeys(ctx2.substate, ctx2)
103 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
103 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
104
104
105 missing = set()
105 missing = set()
106
106
107 for subpath in ctx2.substate:
107 for subpath in ctx2.substate:
108 if subpath not in ctx1.substate:
108 if subpath not in ctx1.substate:
109 del subpaths[subpath]
109 del subpaths[subpath]
110 missing.add(subpath)
110 missing.add(subpath)
111
111
112 for subpath, ctx in sorted(pycompat.iteritems(subpaths)):
112 for subpath, ctx in sorted(pycompat.iteritems(subpaths)):
113 yield subpath, ctx.sub(subpath)
113 yield subpath, ctx.sub(subpath)
114
114
115 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
115 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
116 # status and diff will have an accurate result when it does
116 # status and diff will have an accurate result when it does
117 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
117 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
118 # against itself.
118 # against itself.
119 for subpath in missing:
119 for subpath in missing:
120 yield subpath, ctx2.nullsub(subpath, ctx1)
120 yield subpath, ctx2.nullsub(subpath, ctx1)
121
121
122
122
123 def nochangesfound(ui, repo, excluded=None):
123 def nochangesfound(ui, repo, excluded=None):
124 """Report no changes for push/pull, excluded is None or a list of
124 """Report no changes for push/pull, excluded is None or a list of
125 nodes excluded from the push/pull.
125 nodes excluded from the push/pull.
126 """
126 """
127 secretlist = []
127 secretlist = []
128 if excluded:
128 if excluded:
129 for n in excluded:
129 for n in excluded:
130 ctx = repo[n]
130 ctx = repo[n]
131 if ctx.phase() >= phases.secret and not ctx.extinct():
131 if ctx.phase() >= phases.secret and not ctx.extinct():
132 secretlist.append(n)
132 secretlist.append(n)
133
133
134 if secretlist:
134 if secretlist:
135 ui.status(
135 ui.status(
136 _(b"no changes found (ignored %d secret changesets)\n")
136 _(b"no changes found (ignored %d secret changesets)\n")
137 % len(secretlist)
137 % len(secretlist)
138 )
138 )
139 else:
139 else:
140 ui.status(_(b"no changes found\n"))
140 ui.status(_(b"no changes found\n"))
141
141
142
142
143 def callcatch(ui, func):
143 def callcatch(ui, func):
144 """call func() with global exception handling
144 """call func() with global exception handling
145
145
146 return func() if no exception happens. otherwise do some error handling
146 return func() if no exception happens. otherwise do some error handling
147 and return an exit code accordingly. does not handle all exceptions.
147 and return an exit code accordingly. does not handle all exceptions.
148 """
148 """
149 coarse_exit_code = -1
149 coarse_exit_code = -1
150 detailed_exit_code = -1
150 detailed_exit_code = -1
151 try:
151 try:
152 try:
152 try:
153 return func()
153 return func()
154 except: # re-raises
154 except: # re-raises
155 ui.traceback()
155 ui.traceback()
156 raise
156 raise
157 # Global exception handling, alphabetically
157 # Global exception handling, alphabetically
158 # Mercurial-specific first, followed by built-in and library exceptions
158 # Mercurial-specific first, followed by built-in and library exceptions
159 except error.LockHeld as inst:
159 except error.LockHeld as inst:
160 detailed_exit_code = 20
160 detailed_exit_code = 20
161 if inst.errno == errno.ETIMEDOUT:
161 if inst.errno == errno.ETIMEDOUT:
162 reason = _(b'timed out waiting for lock held by %r') % (
162 reason = _(b'timed out waiting for lock held by %r') % (
163 pycompat.bytestr(inst.locker)
163 pycompat.bytestr(inst.locker)
164 )
164 )
165 else:
165 else:
166 reason = _(b'lock held by %r') % inst.locker
166 reason = _(b'lock held by %r') % inst.locker
167 ui.error(
167 ui.error(
168 _(b"abort: %s: %s\n")
168 _(b"abort: %s: %s\n")
169 % (inst.desc or stringutil.forcebytestr(inst.filename), reason)
169 % (inst.desc or stringutil.forcebytestr(inst.filename), reason)
170 )
170 )
171 if not inst.locker:
171 if not inst.locker:
172 ui.error(_(b"(lock might be very busy)\n"))
172 ui.error(_(b"(lock might be very busy)\n"))
173 except error.LockUnavailable as inst:
173 except error.LockUnavailable as inst:
174 detailed_exit_code = 20
174 detailed_exit_code = 20
175 ui.error(
175 ui.error(
176 _(b"abort: could not lock %s: %s\n")
176 _(b"abort: could not lock %s: %s\n")
177 % (
177 % (
178 inst.desc or stringutil.forcebytestr(inst.filename),
178 inst.desc or stringutil.forcebytestr(inst.filename),
179 encoding.strtolocal(inst.strerror),
179 encoding.strtolocal(inst.strerror),
180 )
180 )
181 )
181 )
182 except error.RepoError as inst:
182 except error.RepoError as inst:
183 ui.error(_(b"abort: %s\n") % inst)
183 ui.error(_(b"abort: %s\n") % inst)
184 if inst.hint:
184 if inst.hint:
185 ui.error(_(b"(%s)\n") % inst.hint)
185 ui.error(_(b"(%s)\n") % inst.hint)
186 except error.ResponseError as inst:
186 except error.ResponseError as inst:
187 ui.error(_(b"abort: %s") % inst.args[0])
187 ui.error(_(b"abort: %s") % inst.args[0])
188 msg = inst.args[1]
188 msg = inst.args[1]
189 if isinstance(msg, type(u'')):
189 if isinstance(msg, type(u'')):
190 msg = pycompat.sysbytes(msg)
190 msg = pycompat.sysbytes(msg)
191 if msg is None:
191 if msg is None:
192 ui.error(b"\n")
192 ui.error(b"\n")
193 elif not isinstance(msg, bytes):
193 elif not isinstance(msg, bytes):
194 ui.error(b" %r\n" % (msg,))
194 ui.error(b" %r\n" % (msg,))
195 elif not msg:
195 elif not msg:
196 ui.error(_(b" empty string\n"))
196 ui.error(_(b" empty string\n"))
197 else:
197 else:
198 ui.error(b"\n%r\n" % pycompat.bytestr(stringutil.ellipsis(msg)))
198 ui.error(b"\n%r\n" % pycompat.bytestr(stringutil.ellipsis(msg)))
199 except error.CensoredNodeError as inst:
199 except error.CensoredNodeError as inst:
200 ui.error(_(b"abort: file censored %s\n") % inst)
200 ui.error(_(b"abort: file censored %s\n") % inst)
201 except error.WdirUnsupported:
201 except error.WdirUnsupported:
202 ui.error(_(b"abort: working directory revision cannot be specified\n"))
202 ui.error(_(b"abort: working directory revision cannot be specified\n"))
203 except error.Error as inst:
203 except error.Error as inst:
204 if inst.detailed_exit_code is not None:
204 if inst.detailed_exit_code is not None:
205 detailed_exit_code = inst.detailed_exit_code
205 detailed_exit_code = inst.detailed_exit_code
206 if inst.coarse_exit_code is not None:
206 if inst.coarse_exit_code is not None:
207 coarse_exit_code = inst.coarse_exit_code
207 coarse_exit_code = inst.coarse_exit_code
208 ui.error(inst.format())
208 ui.error(inst.format())
209 except error.WorkerError as inst:
209 except error.WorkerError as inst:
210 # Don't print a message -- the worker already should have
210 # Don't print a message -- the worker already should have
211 return inst.status_code
211 return inst.status_code
212 except ImportError as inst:
212 except ImportError as inst:
213 ui.error(_(b"abort: %s\n") % stringutil.forcebytestr(inst))
213 ui.error(_(b"abort: %s\n") % stringutil.forcebytestr(inst))
214 m = stringutil.forcebytestr(inst).split()[-1]
214 m = stringutil.forcebytestr(inst).split()[-1]
215 if m in b"mpatch bdiff".split():
215 if m in b"mpatch bdiff".split():
216 ui.error(_(b"(did you forget to compile extensions?)\n"))
216 ui.error(_(b"(did you forget to compile extensions?)\n"))
217 elif m in b"zlib".split():
217 elif m in b"zlib".split():
218 ui.error(_(b"(is your Python install correct?)\n"))
218 ui.error(_(b"(is your Python install correct?)\n"))
219 except util.urlerr.httperror as inst:
219 except util.urlerr.httperror as inst:
220 detailed_exit_code = 100
220 detailed_exit_code = 100
221 ui.error(_(b"abort: %s\n") % stringutil.forcebytestr(inst))
221 ui.error(_(b"abort: %s\n") % stringutil.forcebytestr(inst))
222 except util.urlerr.urlerror as inst:
222 except util.urlerr.urlerror as inst:
223 detailed_exit_code = 100
223 detailed_exit_code = 100
224 try: # usually it is in the form (errno, strerror)
224 try: # usually it is in the form (errno, strerror)
225 reason = inst.reason.args[1]
225 reason = inst.reason.args[1]
226 except (AttributeError, IndexError):
226 except (AttributeError, IndexError):
227 # it might be anything, for example a string
227 # it might be anything, for example a string
228 reason = inst.reason
228 reason = inst.reason
229 if isinstance(reason, pycompat.unicode):
229 if isinstance(reason, pycompat.unicode):
230 # SSLError of Python 2.7.9 contains a unicode
230 # SSLError of Python 2.7.9 contains a unicode
231 reason = encoding.unitolocal(reason)
231 reason = encoding.unitolocal(reason)
232 ui.error(_(b"abort: error: %s\n") % stringutil.forcebytestr(reason))
232 ui.error(_(b"abort: error: %s\n") % stringutil.forcebytestr(reason))
233 except (IOError, OSError) as inst:
233 except (IOError, OSError) as inst:
234 if (
234 if (
235 util.safehasattr(inst, b"args")
235 util.safehasattr(inst, b"args")
236 and inst.args
236 and inst.args
237 and inst.args[0] == errno.EPIPE
237 and inst.args[0] == errno.EPIPE
238 ):
238 ):
239 pass
239 pass
240 elif getattr(inst, "strerror", None): # common IOError or OSError
240 elif getattr(inst, "strerror", None): # common IOError or OSError
241 if getattr(inst, "filename", None) is not None:
241 if getattr(inst, "filename", None) is not None:
242 ui.error(
242 ui.error(
243 _(b"abort: %s: '%s'\n")
243 _(b"abort: %s: '%s'\n")
244 % (
244 % (
245 encoding.strtolocal(inst.strerror),
245 encoding.strtolocal(inst.strerror),
246 stringutil.forcebytestr(inst.filename),
246 stringutil.forcebytestr(inst.filename),
247 )
247 )
248 )
248 )
249 else:
249 else:
250 ui.error(_(b"abort: %s\n") % encoding.strtolocal(inst.strerror))
250 ui.error(_(b"abort: %s\n") % encoding.strtolocal(inst.strerror))
251 else: # suspicious IOError
251 else: # suspicious IOError
252 raise
252 raise
253 except MemoryError:
253 except MemoryError:
254 ui.error(_(b"abort: out of memory\n"))
254 ui.error(_(b"abort: out of memory\n"))
255 except SystemExit as inst:
255 except SystemExit as inst:
256 # Commands shouldn't sys.exit directly, but give a return code.
256 # Commands shouldn't sys.exit directly, but give a return code.
257 # Just in case catch this and and pass exit code to caller.
257 # Just in case catch this and and pass exit code to caller.
258 detailed_exit_code = 254
258 detailed_exit_code = 254
259 coarse_exit_code = inst.code
259 coarse_exit_code = inst.code
260
260
261 if ui.configbool(b'ui', b'detailed-exit-code'):
261 if ui.configbool(b'ui', b'detailed-exit-code'):
262 return detailed_exit_code
262 return detailed_exit_code
263 else:
263 else:
264 return coarse_exit_code
264 return coarse_exit_code
265
265
266
266
267 def checknewlabel(repo, lbl, kind):
267 def checknewlabel(repo, lbl, kind):
268 # Do not use the "kind" parameter in ui output.
268 # Do not use the "kind" parameter in ui output.
269 # It makes strings difficult to translate.
269 # It makes strings difficult to translate.
270 if lbl in [b'tip', b'.', b'null']:
270 if lbl in [b'tip', b'.', b'null']:
271 raise error.InputError(_(b"the name '%s' is reserved") % lbl)
271 raise error.InputError(_(b"the name '%s' is reserved") % lbl)
272 for c in (b':', b'\0', b'\n', b'\r'):
272 for c in (b':', b'\0', b'\n', b'\r'):
273 if c in lbl:
273 if c in lbl:
274 raise error.InputError(
274 raise error.InputError(
275 _(b"%r cannot be used in a name") % pycompat.bytestr(c)
275 _(b"%r cannot be used in a name") % pycompat.bytestr(c)
276 )
276 )
277 try:
277 try:
278 int(lbl)
278 int(lbl)
279 raise error.InputError(_(b"cannot use an integer as a name"))
279 raise error.InputError(_(b"cannot use an integer as a name"))
280 except ValueError:
280 except ValueError:
281 pass
281 pass
282 if lbl.strip() != lbl:
282 if lbl.strip() != lbl:
283 raise error.InputError(
283 raise error.InputError(
284 _(b"leading or trailing whitespace in name %r") % lbl
284 _(b"leading or trailing whitespace in name %r") % lbl
285 )
285 )
286
286
287
287
288 def checkfilename(f):
288 def checkfilename(f):
289 '''Check that the filename f is an acceptable filename for a tracked file'''
289 '''Check that the filename f is an acceptable filename for a tracked file'''
290 if b'\r' in f or b'\n' in f:
290 if b'\r' in f or b'\n' in f:
291 raise error.InputError(
291 raise error.InputError(
292 _(b"'\\n' and '\\r' disallowed in filenames: %r")
292 _(b"'\\n' and '\\r' disallowed in filenames: %r")
293 % pycompat.bytestr(f)
293 % pycompat.bytestr(f)
294 )
294 )
295
295
296
296
297 def checkportable(ui, f):
297 def checkportable(ui, f):
298 '''Check if filename f is portable and warn or abort depending on config'''
298 '''Check if filename f is portable and warn or abort depending on config'''
299 checkfilename(f)
299 checkfilename(f)
300 abort, warn = checkportabilityalert(ui)
300 abort, warn = checkportabilityalert(ui)
301 if abort or warn:
301 if abort or warn:
302 msg = util.checkwinfilename(f)
302 msg = util.checkwinfilename(f)
303 if msg:
303 if msg:
304 msg = b"%s: %s" % (msg, procutil.shellquote(f))
304 msg = b"%s: %s" % (msg, procutil.shellquote(f))
305 if abort:
305 if abort:
306 raise error.InputError(msg)
306 raise error.InputError(msg)
307 ui.warn(_(b"warning: %s\n") % msg)
307 ui.warn(_(b"warning: %s\n") % msg)
308
308
309
309
310 def checkportabilityalert(ui):
310 def checkportabilityalert(ui):
311 """check if the user's config requests nothing, a warning, or abort for
311 """check if the user's config requests nothing, a warning, or abort for
312 non-portable filenames"""
312 non-portable filenames"""
313 val = ui.config(b'ui', b'portablefilenames')
313 val = ui.config(b'ui', b'portablefilenames')
314 lval = val.lower()
314 lval = val.lower()
315 bval = stringutil.parsebool(val)
315 bval = stringutil.parsebool(val)
316 abort = pycompat.iswindows or lval == b'abort'
316 abort = pycompat.iswindows or lval == b'abort'
317 warn = bval or lval == b'warn'
317 warn = bval or lval == b'warn'
318 if bval is None and not (warn or abort or lval == b'ignore'):
318 if bval is None and not (warn or abort or lval == b'ignore'):
319 raise error.ConfigError(
319 raise error.ConfigError(
320 _(b"ui.portablefilenames value is invalid ('%s')") % val
320 _(b"ui.portablefilenames value is invalid ('%s')") % val
321 )
321 )
322 return abort, warn
322 return abort, warn
323
323
324
324
325 class casecollisionauditor(object):
325 class casecollisionauditor(object):
326 def __init__(self, ui, abort, dirstate):
326 def __init__(self, ui, abort, dirstate):
327 self._ui = ui
327 self._ui = ui
328 self._abort = abort
328 self._abort = abort
329 allfiles = b'\0'.join(dirstate)
329 allfiles = b'\0'.join(dirstate)
330 self._loweredfiles = set(encoding.lower(allfiles).split(b'\0'))
330 self._loweredfiles = set(encoding.lower(allfiles).split(b'\0'))
331 self._dirstate = dirstate
331 self._dirstate = dirstate
332 # The purpose of _newfiles is so that we don't complain about
332 # The purpose of _newfiles is so that we don't complain about
333 # case collisions if someone were to call this object with the
333 # case collisions if someone were to call this object with the
334 # same filename twice.
334 # same filename twice.
335 self._newfiles = set()
335 self._newfiles = set()
336
336
337 def __call__(self, f):
337 def __call__(self, f):
338 if f in self._newfiles:
338 if f in self._newfiles:
339 return
339 return
340 fl = encoding.lower(f)
340 fl = encoding.lower(f)
341 if fl in self._loweredfiles and f not in self._dirstate:
341 if fl in self._loweredfiles and f not in self._dirstate:
342 msg = _(b'possible case-folding collision for %s') % f
342 msg = _(b'possible case-folding collision for %s') % f
343 if self._abort:
343 if self._abort:
344 raise error.Abort(msg)
344 raise error.Abort(msg)
345 self._ui.warn(_(b"warning: %s\n") % msg)
345 self._ui.warn(_(b"warning: %s\n") % msg)
346 self._loweredfiles.add(fl)
346 self._loweredfiles.add(fl)
347 self._newfiles.add(f)
347 self._newfiles.add(f)
348
348
349
349
350 def filteredhash(repo, maxrev):
350 def filteredhash(repo, maxrev):
351 """build hash of filtered revisions in the current repoview.
351 """build hash of filtered revisions in the current repoview.
352
352
353 Multiple caches perform up-to-date validation by checking that the
353 Multiple caches perform up-to-date validation by checking that the
354 tiprev and tipnode stored in the cache file match the current repository.
354 tiprev and tipnode stored in the cache file match the current repository.
355 However, this is not sufficient for validating repoviews because the set
355 However, this is not sufficient for validating repoviews because the set
356 of revisions in the view may change without the repository tiprev and
356 of revisions in the view may change without the repository tiprev and
357 tipnode changing.
357 tipnode changing.
358
358
359 This function hashes all the revs filtered from the view and returns
359 This function hashes all the revs filtered from the view and returns
360 that SHA-1 digest.
360 that SHA-1 digest.
361 """
361 """
362 cl = repo.changelog
362 cl = repo.changelog
363 if not cl.filteredrevs:
363 if not cl.filteredrevs:
364 return None
364 return None
365 key = cl._filteredrevs_hashcache.get(maxrev)
365 key = cl._filteredrevs_hashcache.get(maxrev)
366 if not key:
366 if not key:
367 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
367 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
368 if revs:
368 if revs:
369 s = hashutil.sha1()
369 s = hashutil.sha1()
370 for rev in revs:
370 for rev in revs:
371 s.update(b'%d;' % rev)
371 s.update(b'%d;' % rev)
372 key = s.digest()
372 key = s.digest()
373 cl._filteredrevs_hashcache[maxrev] = key
373 cl._filteredrevs_hashcache[maxrev] = key
374 return key
374 return key
375
375
376
376
377 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
377 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
378 """yield every hg repository under path, always recursively.
378 """yield every hg repository under path, always recursively.
379 The recurse flag will only control recursion into repo working dirs"""
379 The recurse flag will only control recursion into repo working dirs"""
380
380
381 def errhandler(err):
381 def errhandler(err):
382 if err.filename == path:
382 if err.filename == path:
383 raise err
383 raise err
384
384
385 samestat = getattr(os.path, 'samestat', None)
385 samestat = getattr(os.path, 'samestat', None)
386 if followsym and samestat is not None:
386 if followsym and samestat is not None:
387
387
388 def adddir(dirlst, dirname):
388 def adddir(dirlst, dirname):
389 dirstat = os.stat(dirname)
389 dirstat = os.stat(dirname)
390 match = any(samestat(dirstat, lstdirstat) for lstdirstat in dirlst)
390 match = any(samestat(dirstat, lstdirstat) for lstdirstat in dirlst)
391 if not match:
391 if not match:
392 dirlst.append(dirstat)
392 dirlst.append(dirstat)
393 return not match
393 return not match
394
394
395 else:
395 else:
396 followsym = False
396 followsym = False
397
397
398 if (seen_dirs is None) and followsym:
398 if (seen_dirs is None) and followsym:
399 seen_dirs = []
399 seen_dirs = []
400 adddir(seen_dirs, path)
400 adddir(seen_dirs, path)
401 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
401 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
402 dirs.sort()
402 dirs.sort()
403 if b'.hg' in dirs:
403 if b'.hg' in dirs:
404 yield root # found a repository
404 yield root # found a repository
405 qroot = os.path.join(root, b'.hg', b'patches')
405 qroot = os.path.join(root, b'.hg', b'patches')
406 if os.path.isdir(os.path.join(qroot, b'.hg')):
406 if os.path.isdir(os.path.join(qroot, b'.hg')):
407 yield qroot # we have a patch queue repo here
407 yield qroot # we have a patch queue repo here
408 if recurse:
408 if recurse:
409 # avoid recursing inside the .hg directory
409 # avoid recursing inside the .hg directory
410 dirs.remove(b'.hg')
410 dirs.remove(b'.hg')
411 else:
411 else:
412 dirs[:] = [] # don't descend further
412 dirs[:] = [] # don't descend further
413 elif followsym:
413 elif followsym:
414 newdirs = []
414 newdirs = []
415 for d in dirs:
415 for d in dirs:
416 fname = os.path.join(root, d)
416 fname = os.path.join(root, d)
417 if adddir(seen_dirs, fname):
417 if adddir(seen_dirs, fname):
418 if os.path.islink(fname):
418 if os.path.islink(fname):
419 for hgname in walkrepos(fname, True, seen_dirs):
419 for hgname in walkrepos(fname, True, seen_dirs):
420 yield hgname
420 yield hgname
421 else:
421 else:
422 newdirs.append(d)
422 newdirs.append(d)
423 dirs[:] = newdirs
423 dirs[:] = newdirs
424
424
425
425
426 def binnode(ctx):
426 def binnode(ctx):
427 """Return binary node id for a given basectx"""
427 """Return binary node id for a given basectx"""
428 node = ctx.node()
428 node = ctx.node()
429 if node is None:
429 if node is None:
430 return ctx.repo().nodeconstants.wdirid
430 return ctx.repo().nodeconstants.wdirid
431 return node
431 return node
432
432
433
433
434 def intrev(ctx):
434 def intrev(ctx):
435 """Return integer for a given basectx that can be used in comparison or
435 """Return integer for a given basectx that can be used in comparison or
436 arithmetic operation"""
436 arithmetic operation"""
437 rev = ctx.rev()
437 rev = ctx.rev()
438 if rev is None:
438 if rev is None:
439 return wdirrev
439 return wdirrev
440 return rev
440 return rev
441
441
442
442
443 def formatchangeid(ctx):
443 def formatchangeid(ctx):
444 """Format changectx as '{rev}:{node|formatnode}', which is the default
444 """Format changectx as '{rev}:{node|formatnode}', which is the default
445 template provided by logcmdutil.changesettemplater"""
445 template provided by logcmdutil.changesettemplater"""
446 repo = ctx.repo()
446 repo = ctx.repo()
447 return formatrevnode(repo.ui, intrev(ctx), binnode(ctx))
447 return formatrevnode(repo.ui, intrev(ctx), binnode(ctx))
448
448
449
449
450 def formatrevnode(ui, rev, node):
450 def formatrevnode(ui, rev, node):
451 """Format given revision and node depending on the current verbosity"""
451 """Format given revision and node depending on the current verbosity"""
452 if ui.debugflag:
452 if ui.debugflag:
453 hexfunc = hex
453 hexfunc = hex
454 else:
454 else:
455 hexfunc = short
455 hexfunc = short
456 return b'%d:%s' % (rev, hexfunc(node))
456 return b'%d:%s' % (rev, hexfunc(node))
457
457
458
458
459 def resolvehexnodeidprefix(repo, prefix):
459 def resolvehexnodeidprefix(repo, prefix):
460 if prefix.startswith(b'x'):
460 if prefix.startswith(b'x'):
461 prefix = prefix[1:]
461 prefix = prefix[1:]
462 try:
462 try:
463 # Uses unfiltered repo because it's faster when prefix is ambiguous/
463 # Uses unfiltered repo because it's faster when prefix is ambiguous/
464 # This matches the shortesthexnodeidprefix() function below.
464 # This matches the shortesthexnodeidprefix() function below.
465 node = repo.unfiltered().changelog._partialmatch(prefix)
465 node = repo.unfiltered().changelog._partialmatch(prefix)
466 except error.AmbiguousPrefixLookupError:
466 except error.AmbiguousPrefixLookupError:
467 revset = repo.ui.config(
467 revset = repo.ui.config(
468 b'experimental', b'revisions.disambiguatewithin'
468 b'experimental', b'revisions.disambiguatewithin'
469 )
469 )
470 if revset:
470 if revset:
471 # Clear config to avoid infinite recursion
471 # Clear config to avoid infinite recursion
472 configoverrides = {
472 configoverrides = {
473 (b'experimental', b'revisions.disambiguatewithin'): None
473 (b'experimental', b'revisions.disambiguatewithin'): None
474 }
474 }
475 with repo.ui.configoverride(configoverrides):
475 with repo.ui.configoverride(configoverrides):
476 revs = repo.anyrevs([revset], user=True)
476 revs = repo.anyrevs([revset], user=True)
477 matches = []
477 matches = []
478 for rev in revs:
478 for rev in revs:
479 node = repo.changelog.node(rev)
479 node = repo.changelog.node(rev)
480 if hex(node).startswith(prefix):
480 if hex(node).startswith(prefix):
481 matches.append(node)
481 matches.append(node)
482 if len(matches) == 1:
482 if len(matches) == 1:
483 return matches[0]
483 return matches[0]
484 raise
484 raise
485 if node is None:
485 if node is None:
486 return
486 return
487 repo.changelog.rev(node) # make sure node isn't filtered
487 repo.changelog.rev(node) # make sure node isn't filtered
488 return node
488 return node
489
489
490
490
491 def mayberevnum(repo, prefix):
491 def mayberevnum(repo, prefix):
492 """Checks if the given prefix may be mistaken for a revision number"""
492 """Checks if the given prefix may be mistaken for a revision number"""
493 try:
493 try:
494 i = int(prefix)
494 i = int(prefix)
495 # if we are a pure int, then starting with zero will not be
495 # if we are a pure int, then starting with zero will not be
496 # confused as a rev; or, obviously, if the int is larger
496 # confused as a rev; or, obviously, if the int is larger
497 # than the value of the tip rev. We still need to disambiguate if
497 # than the value of the tip rev. We still need to disambiguate if
498 # prefix == '0', since that *is* a valid revnum.
498 # prefix == '0', since that *is* a valid revnum.
499 if (prefix != b'0' and prefix[0:1] == b'0') or i >= len(repo):
499 if (prefix != b'0' and prefix[0:1] == b'0') or i >= len(repo):
500 return False
500 return False
501 return True
501 return True
502 except ValueError:
502 except ValueError:
503 return False
503 return False
504
504
505
505
506 def shortesthexnodeidprefix(repo, node, minlength=1, cache=None):
506 def shortesthexnodeidprefix(repo, node, minlength=1, cache=None):
507 """Find the shortest unambiguous prefix that matches hexnode.
507 """Find the shortest unambiguous prefix that matches hexnode.
508
508
509 If "cache" is not None, it must be a dictionary that can be used for
509 If "cache" is not None, it must be a dictionary that can be used for
510 caching between calls to this method.
510 caching between calls to this method.
511 """
511 """
512 # _partialmatch() of filtered changelog could take O(len(repo)) time,
512 # _partialmatch() of filtered changelog could take O(len(repo)) time,
513 # which would be unacceptably slow. so we look for hash collision in
513 # which would be unacceptably slow. so we look for hash collision in
514 # unfiltered space, which means some hashes may be slightly longer.
514 # unfiltered space, which means some hashes may be slightly longer.
515
515
516 minlength = max(minlength, 1)
516 minlength = max(minlength, 1)
517
517
518 def disambiguate(prefix):
518 def disambiguate(prefix):
519 """Disambiguate against revnums."""
519 """Disambiguate against revnums."""
520 if repo.ui.configbool(b'experimental', b'revisions.prefixhexnode'):
520 if repo.ui.configbool(b'experimental', b'revisions.prefixhexnode'):
521 if mayberevnum(repo, prefix):
521 if mayberevnum(repo, prefix):
522 return b'x' + prefix
522 return b'x' + prefix
523 else:
523 else:
524 return prefix
524 return prefix
525
525
526 hexnode = hex(node)
526 hexnode = hex(node)
527 for length in range(len(prefix), len(hexnode) + 1):
527 for length in range(len(prefix), len(hexnode) + 1):
528 prefix = hexnode[:length]
528 prefix = hexnode[:length]
529 if not mayberevnum(repo, prefix):
529 if not mayberevnum(repo, prefix):
530 return prefix
530 return prefix
531
531
532 cl = repo.unfiltered().changelog
532 cl = repo.unfiltered().changelog
533 revset = repo.ui.config(b'experimental', b'revisions.disambiguatewithin')
533 revset = repo.ui.config(b'experimental', b'revisions.disambiguatewithin')
534 if revset:
534 if revset:
535 revs = None
535 revs = None
536 if cache is not None:
536 if cache is not None:
537 revs = cache.get(b'disambiguationrevset')
537 revs = cache.get(b'disambiguationrevset')
538 if revs is None:
538 if revs is None:
539 revs = repo.anyrevs([revset], user=True)
539 revs = repo.anyrevs([revset], user=True)
540 if cache is not None:
540 if cache is not None:
541 cache[b'disambiguationrevset'] = revs
541 cache[b'disambiguationrevset'] = revs
542 if cl.rev(node) in revs:
542 if cl.rev(node) in revs:
543 hexnode = hex(node)
543 hexnode = hex(node)
544 nodetree = None
544 nodetree = None
545 if cache is not None:
545 if cache is not None:
546 nodetree = cache.get(b'disambiguationnodetree')
546 nodetree = cache.get(b'disambiguationnodetree')
547 if not nodetree:
547 if not nodetree:
548 if util.safehasattr(parsers, 'nodetree'):
548 if util.safehasattr(parsers, 'nodetree'):
549 # The CExt is the only implementation to provide a nodetree
549 # The CExt is the only implementation to provide a nodetree
550 # class so far.
550 # class so far.
551 index = cl.index
551 index = cl.index
552 if util.safehasattr(index, 'get_cindex'):
552 if util.safehasattr(index, 'get_cindex'):
553 # the rust wrapped need to give access to its internal index
553 # the rust wrapped need to give access to its internal index
554 index = index.get_cindex()
554 index = index.get_cindex()
555 nodetree = parsers.nodetree(index, len(revs))
555 nodetree = parsers.nodetree(index, len(revs))
556 for r in revs:
556 for r in revs:
557 nodetree.insert(r)
557 nodetree.insert(r)
558 if cache is not None:
558 if cache is not None:
559 cache[b'disambiguationnodetree'] = nodetree
559 cache[b'disambiguationnodetree'] = nodetree
560 if nodetree is not None:
560 if nodetree is not None:
561 length = max(nodetree.shortest(node), minlength)
561 length = max(nodetree.shortest(node), minlength)
562 prefix = hexnode[:length]
562 prefix = hexnode[:length]
563 return disambiguate(prefix)
563 return disambiguate(prefix)
564 for length in range(minlength, len(hexnode) + 1):
564 for length in range(minlength, len(hexnode) + 1):
565 matches = []
565 matches = []
566 prefix = hexnode[:length]
566 prefix = hexnode[:length]
567 for rev in revs:
567 for rev in revs:
568 otherhexnode = repo[rev].hex()
568 otherhexnode = repo[rev].hex()
569 if prefix == otherhexnode[:length]:
569 if prefix == otherhexnode[:length]:
570 matches.append(otherhexnode)
570 matches.append(otherhexnode)
571 if len(matches) == 1:
571 if len(matches) == 1:
572 return disambiguate(prefix)
572 return disambiguate(prefix)
573
573
574 try:
574 try:
575 return disambiguate(cl.shortest(node, minlength))
575 return disambiguate(cl.shortest(node, minlength))
576 except error.LookupError:
576 except error.LookupError:
577 raise error.RepoLookupError()
577 raise error.RepoLookupError()
578
578
579
579
580 def isrevsymbol(repo, symbol):
580 def isrevsymbol(repo, symbol):
581 """Checks if a symbol exists in the repo.
581 """Checks if a symbol exists in the repo.
582
582
583 See revsymbol() for details. Raises error.AmbiguousPrefixLookupError if the
583 See revsymbol() for details. Raises error.AmbiguousPrefixLookupError if the
584 symbol is an ambiguous nodeid prefix.
584 symbol is an ambiguous nodeid prefix.
585 """
585 """
586 try:
586 try:
587 revsymbol(repo, symbol)
587 revsymbol(repo, symbol)
588 return True
588 return True
589 except error.RepoLookupError:
589 except error.RepoLookupError:
590 return False
590 return False
591
591
592
592
593 def revsymbol(repo, symbol):
593 def revsymbol(repo, symbol):
594 """Returns a context given a single revision symbol (as string).
594 """Returns a context given a single revision symbol (as string).
595
595
596 This is similar to revsingle(), but accepts only a single revision symbol,
596 This is similar to revsingle(), but accepts only a single revision symbol,
597 i.e. things like ".", "tip", "1234", "deadbeef", "my-bookmark" work, but
597 i.e. things like ".", "tip", "1234", "deadbeef", "my-bookmark" work, but
598 not "max(public())".
598 not "max(public())".
599 """
599 """
600 if not isinstance(symbol, bytes):
600 if not isinstance(symbol, bytes):
601 msg = (
601 msg = (
602 b"symbol (%s of type %s) was not a string, did you mean "
602 b"symbol (%s of type %s) was not a string, did you mean "
603 b"repo[symbol]?" % (symbol, type(symbol))
603 b"repo[symbol]?" % (symbol, type(symbol))
604 )
604 )
605 raise error.ProgrammingError(msg)
605 raise error.ProgrammingError(msg)
606 try:
606 try:
607 if symbol in (b'.', b'tip', b'null'):
607 if symbol in (b'.', b'tip', b'null'):
608 return repo[symbol]
608 return repo[symbol]
609
609
610 try:
610 try:
611 r = int(symbol)
611 r = int(symbol)
612 if b'%d' % r != symbol:
612 if b'%d' % r != symbol:
613 raise ValueError
613 raise ValueError
614 l = len(repo.changelog)
614 l = len(repo.changelog)
615 if r < 0:
615 if r < 0:
616 r += l
616 r += l
617 if r < 0 or r >= l and r != wdirrev:
617 if r < 0 or r >= l and r != wdirrev:
618 raise ValueError
618 raise ValueError
619 return repo[r]
619 return repo[r]
620 except error.FilteredIndexError:
620 except error.FilteredIndexError:
621 raise
621 raise
622 except (ValueError, OverflowError, IndexError):
622 except (ValueError, OverflowError, IndexError):
623 pass
623 pass
624
624
625 if len(symbol) == 2 * repo.nodeconstants.nodelen:
625 if len(symbol) == 2 * repo.nodeconstants.nodelen:
626 try:
626 try:
627 node = bin(symbol)
627 node = bin(symbol)
628 rev = repo.changelog.rev(node)
628 rev = repo.changelog.rev(node)
629 return repo[rev]
629 return repo[rev]
630 except error.FilteredLookupError:
630 except error.FilteredLookupError:
631 raise
631 raise
632 except (TypeError, LookupError):
632 except (TypeError, LookupError):
633 pass
633 pass
634
634
635 # look up bookmarks through the name interface
635 # look up bookmarks through the name interface
636 try:
636 try:
637 node = repo.names.singlenode(repo, symbol)
637 node = repo.names.singlenode(repo, symbol)
638 rev = repo.changelog.rev(node)
638 rev = repo.changelog.rev(node)
639 return repo[rev]
639 return repo[rev]
640 except KeyError:
640 except KeyError:
641 pass
641 pass
642
642
643 node = resolvehexnodeidprefix(repo, symbol)
643 node = resolvehexnodeidprefix(repo, symbol)
644 if node is not None:
644 if node is not None:
645 rev = repo.changelog.rev(node)
645 rev = repo.changelog.rev(node)
646 return repo[rev]
646 return repo[rev]
647
647
648 raise error.RepoLookupError(_(b"unknown revision '%s'") % symbol)
648 raise error.RepoLookupError(_(b"unknown revision '%s'") % symbol)
649
649
650 except error.WdirUnsupported:
650 except error.WdirUnsupported:
651 return repo[None]
651 return repo[None]
652 except (
652 except (
653 error.FilteredIndexError,
653 error.FilteredIndexError,
654 error.FilteredLookupError,
654 error.FilteredLookupError,
655 error.FilteredRepoLookupError,
655 error.FilteredRepoLookupError,
656 ):
656 ):
657 raise _filterederror(repo, symbol)
657 raise _filterederror(repo, symbol)
658
658
659
659
660 def _filterederror(repo, changeid):
660 def _filterederror(repo, changeid):
661 """build an exception to be raised about a filtered changeid
661 """build an exception to be raised about a filtered changeid
662
662
663 This is extracted in a function to help extensions (eg: evolve) to
663 This is extracted in a function to help extensions (eg: evolve) to
664 experiment with various message variants."""
664 experiment with various message variants."""
665 if repo.filtername.startswith(b'visible'):
665 if repo.filtername.startswith(b'visible'):
666
666
667 # Check if the changeset is obsolete
667 # Check if the changeset is obsolete
668 unfilteredrepo = repo.unfiltered()
668 unfilteredrepo = repo.unfiltered()
669 ctx = revsymbol(unfilteredrepo, changeid)
669 ctx = revsymbol(unfilteredrepo, changeid)
670
670
671 # If the changeset is obsolete, enrich the message with the reason
671 # If the changeset is obsolete, enrich the message with the reason
672 # that made this changeset not visible
672 # that made this changeset not visible
673 if ctx.obsolete():
673 if ctx.obsolete():
674 msg = obsutil._getfilteredreason(repo, changeid, ctx)
674 msg = obsutil._getfilteredreason(repo, changeid, ctx)
675 else:
675 else:
676 msg = _(b"hidden revision '%s'") % changeid
676 msg = _(b"hidden revision '%s'") % changeid
677
677
678 hint = _(b'use --hidden to access hidden revisions')
678 hint = _(b'use --hidden to access hidden revisions')
679
679
680 return error.FilteredRepoLookupError(msg, hint=hint)
680 return error.FilteredRepoLookupError(msg, hint=hint)
681 msg = _(b"filtered revision '%s' (not in '%s' subset)")
681 msg = _(b"filtered revision '%s' (not in '%s' subset)")
682 msg %= (changeid, repo.filtername)
682 msg %= (changeid, repo.filtername)
683 return error.FilteredRepoLookupError(msg)
683 return error.FilteredRepoLookupError(msg)
684
684
685
685
686 def revsingle(repo, revspec, default=b'.', localalias=None):
686 def revsingle(repo, revspec, default=b'.', localalias=None):
687 if not revspec and revspec != 0:
687 if not revspec and revspec != 0:
688 return repo[default]
688 return repo[default]
689
689
690 l = revrange(repo, [revspec], localalias=localalias)
690 l = revrange(repo, [revspec], localalias=localalias)
691 if not l:
691 if not l:
692 raise error.InputError(_(b'empty revision set'))
692 raise error.InputError(_(b'empty revision set'))
693 return repo[l.last()]
693 return repo[l.last()]
694
694
695
695
696 def _pairspec(revspec):
696 def _pairspec(revspec):
697 tree = revsetlang.parse(revspec)
697 tree = revsetlang.parse(revspec)
698 return tree and tree[0] in (
698 return tree and tree[0] in (
699 b'range',
699 b'range',
700 b'rangepre',
700 b'rangepre',
701 b'rangepost',
701 b'rangepost',
702 b'rangeall',
702 b'rangeall',
703 )
703 )
704
704
705
705
706 def revpair(repo, revs):
706 def revpair(repo, revs):
707 if not revs:
707 if not revs:
708 return repo[b'.'], repo[None]
708 return repo[b'.'], repo[None]
709
709
710 l = revrange(repo, revs)
710 l = revrange(repo, revs)
711
711
712 if not l:
712 if not l:
713 raise error.InputError(_(b'empty revision range'))
713 raise error.InputError(_(b'empty revision range'))
714
714
715 first = l.first()
715 first = l.first()
716 second = l.last()
716 second = l.last()
717
717
718 if (
718 if (
719 first == second
719 first == second
720 and len(revs) >= 2
720 and len(revs) >= 2
721 and not all(revrange(repo, [r]) for r in revs)
721 and not all(revrange(repo, [r]) for r in revs)
722 ):
722 ):
723 raise error.InputError(_(b'empty revision on one side of range'))
723 raise error.InputError(_(b'empty revision on one side of range'))
724
724
725 # if top-level is range expression, the result must always be a pair
725 # if top-level is range expression, the result must always be a pair
726 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
726 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
727 return repo[first], repo[None]
727 return repo[first], repo[None]
728
728
729 return repo[first], repo[second]
729 return repo[first], repo[second]
730
730
731
731
732 def revrange(repo, specs, localalias=None):
732 def revrange(repo, specs, localalias=None):
733 """Execute 1 to many revsets and return the union.
733 """Execute 1 to many revsets and return the union.
734
734
735 This is the preferred mechanism for executing revsets using user-specified
735 This is the preferred mechanism for executing revsets using user-specified
736 config options, such as revset aliases.
736 config options, such as revset aliases.
737
737
738 The revsets specified by ``specs`` will be executed via a chained ``OR``
738 The revsets specified by ``specs`` will be executed via a chained ``OR``
739 expression. If ``specs`` is empty, an empty result is returned.
739 expression. If ``specs`` is empty, an empty result is returned.
740
740
741 ``specs`` can contain integers, in which case they are assumed to be
741 ``specs`` can contain integers, in which case they are assumed to be
742 revision numbers.
742 revision numbers.
743
743
744 It is assumed the revsets are already formatted. If you have arguments
744 It is assumed the revsets are already formatted. If you have arguments
745 that need to be expanded in the revset, call ``revsetlang.formatspec()``
745 that need to be expanded in the revset, call ``revsetlang.formatspec()``
746 and pass the result as an element of ``specs``.
746 and pass the result as an element of ``specs``.
747
747
748 Specifying a single revset is allowed.
748 Specifying a single revset is allowed.
749
749
750 Returns a ``smartset.abstractsmartset`` which is a list-like interface over
750 Returns a ``smartset.abstractsmartset`` which is a list-like interface over
751 integer revisions.
751 integer revisions.
752 """
752 """
753 allspecs = []
753 allspecs = []
754 for spec in specs:
754 for spec in specs:
755 if isinstance(spec, int):
755 if isinstance(spec, int):
756 spec = revsetlang.formatspec(b'%d', spec)
756 spec = revsetlang.formatspec(b'%d', spec)
757 allspecs.append(spec)
757 allspecs.append(spec)
758 return repo.anyrevs(allspecs, user=True, localalias=localalias)
758 return repo.anyrevs(allspecs, user=True, localalias=localalias)
759
759
760
760
761 def increasingwindows(windowsize=8, sizelimit=512):
761 def increasingwindows(windowsize=8, sizelimit=512):
762 while True:
762 while True:
763 yield windowsize
763 yield windowsize
764 if windowsize < sizelimit:
764 if windowsize < sizelimit:
765 windowsize *= 2
765 windowsize *= 2
766
766
767
767
768 def walkchangerevs(repo, revs, makefilematcher, prepare):
768 def walkchangerevs(repo, revs, makefilematcher, prepare):
769 """Iterate over files and the revs in a "windowed" way.
769 """Iterate over files and the revs in a "windowed" way.
770
770
771 Callers most commonly need to iterate backwards over the history
771 Callers most commonly need to iterate backwards over the history
772 in which they are interested. Doing so has awful (quadratic-looking)
772 in which they are interested. Doing so has awful (quadratic-looking)
773 performance, so we use iterators in a "windowed" way.
773 performance, so we use iterators in a "windowed" way.
774
774
775 We walk a window of revisions in the desired order. Within the
775 We walk a window of revisions in the desired order. Within the
776 window, we first walk forwards to gather data, then in the desired
776 window, we first walk forwards to gather data, then in the desired
777 order (usually backwards) to display it.
777 order (usually backwards) to display it.
778
778
779 This function returns an iterator yielding contexts. Before
779 This function returns an iterator yielding contexts. Before
780 yielding each context, the iterator will first call the prepare
780 yielding each context, the iterator will first call the prepare
781 function on each context in the window in forward order."""
781 function on each context in the window in forward order."""
782
782
783 if not revs:
783 if not revs:
784 return []
784 return []
785 change = repo.__getitem__
785 change = repo.__getitem__
786
786
787 def iterate():
787 def iterate():
788 it = iter(revs)
788 it = iter(revs)
789 stopiteration = False
789 stopiteration = False
790 for windowsize in increasingwindows():
790 for windowsize in increasingwindows():
791 nrevs = []
791 nrevs = []
792 for i in pycompat.xrange(windowsize):
792 for i in pycompat.xrange(windowsize):
793 rev = next(it, None)
793 rev = next(it, None)
794 if rev is None:
794 if rev is None:
795 stopiteration = True
795 stopiteration = True
796 break
796 break
797 nrevs.append(rev)
797 nrevs.append(rev)
798 for rev in sorted(nrevs):
798 for rev in sorted(nrevs):
799 ctx = change(rev)
799 ctx = change(rev)
800 prepare(ctx, makefilematcher(ctx))
800 prepare(ctx, makefilematcher(ctx))
801 for rev in nrevs:
801 for rev in nrevs:
802 yield change(rev)
802 yield change(rev)
803
803
804 if stopiteration:
804 if stopiteration:
805 break
805 break
806
806
807 return iterate()
807 return iterate()
808
808
809
809
810 def meaningfulparents(repo, ctx):
810 def meaningfulparents(repo, ctx):
811 """Return list of meaningful (or all if debug) parentrevs for rev.
811 """Return list of meaningful (or all if debug) parentrevs for rev.
812
812
813 For merges (two non-nullrev revisions) both parents are meaningful.
813 For merges (two non-nullrev revisions) both parents are meaningful.
814 Otherwise the first parent revision is considered meaningful if it
814 Otherwise the first parent revision is considered meaningful if it
815 is not the preceding revision.
815 is not the preceding revision.
816 """
816 """
817 parents = ctx.parents()
817 parents = ctx.parents()
818 if len(parents) > 1:
818 if len(parents) > 1:
819 return parents
819 return parents
820 if repo.ui.debugflag:
820 if repo.ui.debugflag:
821 return [parents[0], repo[nullrev]]
821 return [parents[0], repo[nullrev]]
822 if parents[0].rev() >= intrev(ctx) - 1:
822 if parents[0].rev() >= intrev(ctx) - 1:
823 return []
823 return []
824 return parents
824 return parents
825
825
826
826
827 def getuipathfn(repo, legacyrelativevalue=False, forcerelativevalue=None):
827 def getuipathfn(repo, legacyrelativevalue=False, forcerelativevalue=None):
828 """Return a function that produced paths for presenting to the user.
828 """Return a function that produced paths for presenting to the user.
829
829
830 The returned function takes a repo-relative path and produces a path
830 The returned function takes a repo-relative path and produces a path
831 that can be presented in the UI.
831 that can be presented in the UI.
832
832
833 Depending on the value of ui.relative-paths, either a repo-relative or
833 Depending on the value of ui.relative-paths, either a repo-relative or
834 cwd-relative path will be produced.
834 cwd-relative path will be produced.
835
835
836 legacyrelativevalue is the value to use if ui.relative-paths=legacy
836 legacyrelativevalue is the value to use if ui.relative-paths=legacy
837
837
838 If forcerelativevalue is not None, then that value will be used regardless
838 If forcerelativevalue is not None, then that value will be used regardless
839 of what ui.relative-paths is set to.
839 of what ui.relative-paths is set to.
840 """
840 """
841 if forcerelativevalue is not None:
841 if forcerelativevalue is not None:
842 relative = forcerelativevalue
842 relative = forcerelativevalue
843 else:
843 else:
844 config = repo.ui.config(b'ui', b'relative-paths')
844 config = repo.ui.config(b'ui', b'relative-paths')
845 if config == b'legacy':
845 if config == b'legacy':
846 relative = legacyrelativevalue
846 relative = legacyrelativevalue
847 else:
847 else:
848 relative = stringutil.parsebool(config)
848 relative = stringutil.parsebool(config)
849 if relative is None:
849 if relative is None:
850 raise error.ConfigError(
850 raise error.ConfigError(
851 _(b"ui.relative-paths is not a boolean ('%s')") % config
851 _(b"ui.relative-paths is not a boolean ('%s')") % config
852 )
852 )
853
853
854 if relative:
854 if relative:
855 cwd = repo.getcwd()
855 cwd = repo.getcwd()
856 if cwd != b'':
856 if cwd != b'':
857 # this branch would work even if cwd == b'' (ie cwd = repo
857 # this branch would work even if cwd == b'' (ie cwd = repo
858 # root), but its generality makes the returned function slower
858 # root), but its generality makes the returned function slower
859 pathto = repo.pathto
859 pathto = repo.pathto
860 return lambda f: pathto(f, cwd)
860 return lambda f: pathto(f, cwd)
861 if repo.ui.configbool(b'ui', b'slash'):
861 if repo.ui.configbool(b'ui', b'slash'):
862 return lambda f: f
862 return lambda f: f
863 else:
863 else:
864 return util.localpath
864 return util.localpath
865
865
866
866
867 def subdiruipathfn(subpath, uipathfn):
867 def subdiruipathfn(subpath, uipathfn):
868 '''Create a new uipathfn that treats the file as relative to subpath.'''
868 '''Create a new uipathfn that treats the file as relative to subpath.'''
869 return lambda f: uipathfn(posixpath.join(subpath, f))
869 return lambda f: uipathfn(posixpath.join(subpath, f))
870
870
871
871
872 def anypats(pats, opts):
872 def anypats(pats, opts):
873 """Checks if any patterns, including --include and --exclude were given.
873 """Checks if any patterns, including --include and --exclude were given.
874
874
875 Some commands (e.g. addremove) use this condition for deciding whether to
875 Some commands (e.g. addremove) use this condition for deciding whether to
876 print absolute or relative paths.
876 print absolute or relative paths.
877 """
877 """
878 return bool(pats or opts.get(b'include') or opts.get(b'exclude'))
878 return bool(pats or opts.get(b'include') or opts.get(b'exclude'))
879
879
880
880
881 def expandpats(pats):
881 def expandpats(pats):
882 """Expand bare globs when running on windows.
882 """Expand bare globs when running on windows.
883 On posix we assume it already has already been done by sh."""
883 On posix we assume it already has already been done by sh."""
884 if not util.expandglobs:
884 if not util.expandglobs:
885 return list(pats)
885 return list(pats)
886 ret = []
886 ret = []
887 for kindpat in pats:
887 for kindpat in pats:
888 kind, pat = matchmod._patsplit(kindpat, None)
888 kind, pat = matchmod._patsplit(kindpat, None)
889 if kind is None:
889 if kind is None:
890 try:
890 try:
891 globbed = glob.glob(pat)
891 globbed = glob.glob(pat)
892 except re.error:
892 except re.error:
893 globbed = [pat]
893 globbed = [pat]
894 if globbed:
894 if globbed:
895 ret.extend(globbed)
895 ret.extend(globbed)
896 continue
896 continue
897 ret.append(kindpat)
897 ret.append(kindpat)
898 return ret
898 return ret
899
899
900
900
901 def matchandpats(
901 def matchandpats(
902 ctx, pats=(), opts=None, globbed=False, default=b'relpath', badfn=None
902 ctx, pats=(), opts=None, globbed=False, default=b'relpath', badfn=None
903 ):
903 ):
904 """Return a matcher and the patterns that were used.
904 """Return a matcher and the patterns that were used.
905 The matcher will warn about bad matches, unless an alternate badfn callback
905 The matcher will warn about bad matches, unless an alternate badfn callback
906 is provided."""
906 is provided."""
907 if opts is None:
907 if opts is None:
908 opts = {}
908 opts = {}
909 if not globbed and default == b'relpath':
909 if not globbed and default == b'relpath':
910 pats = expandpats(pats or [])
910 pats = expandpats(pats or [])
911
911
912 uipathfn = getuipathfn(ctx.repo(), legacyrelativevalue=True)
912 uipathfn = getuipathfn(ctx.repo(), legacyrelativevalue=True)
913
913
914 def bad(f, msg):
914 def bad(f, msg):
915 ctx.repo().ui.warn(b"%s: %s\n" % (uipathfn(f), msg))
915 ctx.repo().ui.warn(b"%s: %s\n" % (uipathfn(f), msg))
916
916
917 if badfn is None:
917 if badfn is None:
918 badfn = bad
918 badfn = bad
919
919
920 m = ctx.match(
920 m = ctx.match(
921 pats,
921 pats,
922 opts.get(b'include'),
922 opts.get(b'include'),
923 opts.get(b'exclude'),
923 opts.get(b'exclude'),
924 default,
924 default,
925 listsubrepos=opts.get(b'subrepos'),
925 listsubrepos=opts.get(b'subrepos'),
926 badfn=badfn,
926 badfn=badfn,
927 )
927 )
928
928
929 if m.always():
929 if m.always():
930 pats = []
930 pats = []
931 return m, pats
931 return m, pats
932
932
933
933
934 def match(
934 def match(
935 ctx, pats=(), opts=None, globbed=False, default=b'relpath', badfn=None
935 ctx, pats=(), opts=None, globbed=False, default=b'relpath', badfn=None
936 ):
936 ):
937 '''Return a matcher that will warn about bad matches.'''
937 '''Return a matcher that will warn about bad matches.'''
938 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
938 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
939
939
940
940
941 def matchall(repo):
941 def matchall(repo):
942 '''Return a matcher that will efficiently match everything.'''
942 '''Return a matcher that will efficiently match everything.'''
943 return matchmod.always()
943 return matchmod.always()
944
944
945
945
946 def matchfiles(repo, files, badfn=None):
946 def matchfiles(repo, files, badfn=None):
947 '''Return a matcher that will efficiently match exactly these files.'''
947 '''Return a matcher that will efficiently match exactly these files.'''
948 return matchmod.exact(files, badfn=badfn)
948 return matchmod.exact(files, badfn=badfn)
949
949
950
950
951 def parsefollowlinespattern(repo, rev, pat, msg):
951 def parsefollowlinespattern(repo, rev, pat, msg):
952 """Return a file name from `pat` pattern suitable for usage in followlines
952 """Return a file name from `pat` pattern suitable for usage in followlines
953 logic.
953 logic.
954 """
954 """
955 if not matchmod.patkind(pat):
955 if not matchmod.patkind(pat):
956 return pathutil.canonpath(repo.root, repo.getcwd(), pat)
956 return pathutil.canonpath(repo.root, repo.getcwd(), pat)
957 else:
957 else:
958 ctx = repo[rev]
958 ctx = repo[rev]
959 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=ctx)
959 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=ctx)
960 files = [f for f in ctx if m(f)]
960 files = [f for f in ctx if m(f)]
961 if len(files) != 1:
961 if len(files) != 1:
962 raise error.ParseError(msg)
962 raise error.ParseError(msg)
963 return files[0]
963 return files[0]
964
964
965
965
966 def getorigvfs(ui, repo):
966 def getorigvfs(ui, repo):
967 """return a vfs suitable to save 'orig' file
967 """return a vfs suitable to save 'orig' file
968
968
969 return None if no special directory is configured"""
969 return None if no special directory is configured"""
970 origbackuppath = ui.config(b'ui', b'origbackuppath')
970 origbackuppath = ui.config(b'ui', b'origbackuppath')
971 if not origbackuppath:
971 if not origbackuppath:
972 return None
972 return None
973 return vfs.vfs(repo.wvfs.join(origbackuppath))
973 return vfs.vfs(repo.wvfs.join(origbackuppath))
974
974
975
975
976 def backuppath(ui, repo, filepath):
976 def backuppath(ui, repo, filepath):
977 """customize where working copy backup files (.orig files) are created
977 """customize where working copy backup files (.orig files) are created
978
978
979 Fetch user defined path from config file: [ui] origbackuppath = <path>
979 Fetch user defined path from config file: [ui] origbackuppath = <path>
980 Fall back to default (filepath with .orig suffix) if not specified
980 Fall back to default (filepath with .orig suffix) if not specified
981
981
982 filepath is repo-relative
982 filepath is repo-relative
983
983
984 Returns an absolute path
984 Returns an absolute path
985 """
985 """
986 origvfs = getorigvfs(ui, repo)
986 origvfs = getorigvfs(ui, repo)
987 if origvfs is None:
987 if origvfs is None:
988 return repo.wjoin(filepath + b".orig")
988 return repo.wjoin(filepath + b".orig")
989
989
990 origbackupdir = origvfs.dirname(filepath)
990 origbackupdir = origvfs.dirname(filepath)
991 if not origvfs.isdir(origbackupdir) or origvfs.islink(origbackupdir):
991 if not origvfs.isdir(origbackupdir) or origvfs.islink(origbackupdir):
992 ui.note(_(b'creating directory: %s\n') % origvfs.join(origbackupdir))
992 ui.note(_(b'creating directory: %s\n') % origvfs.join(origbackupdir))
993
993
994 # Remove any files that conflict with the backup file's path
994 # Remove any files that conflict with the backup file's path
995 for f in reversed(list(pathutil.finddirs(filepath))):
995 for f in reversed(list(pathutil.finddirs(filepath))):
996 if origvfs.isfileorlink(f):
996 if origvfs.isfileorlink(f):
997 ui.note(_(b'removing conflicting file: %s\n') % origvfs.join(f))
997 ui.note(_(b'removing conflicting file: %s\n') % origvfs.join(f))
998 origvfs.unlink(f)
998 origvfs.unlink(f)
999 break
999 break
1000
1000
1001 origvfs.makedirs(origbackupdir)
1001 origvfs.makedirs(origbackupdir)
1002
1002
1003 if origvfs.isdir(filepath) and not origvfs.islink(filepath):
1003 if origvfs.isdir(filepath) and not origvfs.islink(filepath):
1004 ui.note(
1004 ui.note(
1005 _(b'removing conflicting directory: %s\n') % origvfs.join(filepath)
1005 _(b'removing conflicting directory: %s\n') % origvfs.join(filepath)
1006 )
1006 )
1007 origvfs.rmtree(filepath, forcibly=True)
1007 origvfs.rmtree(filepath, forcibly=True)
1008
1008
1009 return origvfs.join(filepath)
1009 return origvfs.join(filepath)
1010
1010
1011
1011
1012 class _containsnode(object):
1012 class _containsnode(object):
1013 """proxy __contains__(node) to container.__contains__ which accepts revs"""
1013 """proxy __contains__(node) to container.__contains__ which accepts revs"""
1014
1014
1015 def __init__(self, repo, revcontainer):
1015 def __init__(self, repo, revcontainer):
1016 self._torev = repo.changelog.rev
1016 self._torev = repo.changelog.rev
1017 self._revcontains = revcontainer.__contains__
1017 self._revcontains = revcontainer.__contains__
1018
1018
1019 def __contains__(self, node):
1019 def __contains__(self, node):
1020 return self._revcontains(self._torev(node))
1020 return self._revcontains(self._torev(node))
1021
1021
1022
1022
1023 def cleanupnodes(
1023 def cleanupnodes(
1024 repo,
1024 repo,
1025 replacements,
1025 replacements,
1026 operation,
1026 operation,
1027 moves=None,
1027 moves=None,
1028 metadata=None,
1028 metadata=None,
1029 fixphase=False,
1029 fixphase=False,
1030 targetphase=None,
1030 targetphase=None,
1031 backup=True,
1031 backup=True,
1032 ):
1032 ):
1033 """do common cleanups when old nodes are replaced by new nodes
1033 """do common cleanups when old nodes are replaced by new nodes
1034
1034
1035 That includes writing obsmarkers or stripping nodes, and moving bookmarks.
1035 That includes writing obsmarkers or stripping nodes, and moving bookmarks.
1036 (we might also want to move working directory parent in the future)
1036 (we might also want to move working directory parent in the future)
1037
1037
1038 By default, bookmark moves are calculated automatically from 'replacements',
1038 By default, bookmark moves are calculated automatically from 'replacements',
1039 but 'moves' can be used to override that. Also, 'moves' may include
1039 but 'moves' can be used to override that. Also, 'moves' may include
1040 additional bookmark moves that should not have associated obsmarkers.
1040 additional bookmark moves that should not have associated obsmarkers.
1041
1041
1042 replacements is {oldnode: [newnode]} or a iterable of nodes if they do not
1042 replacements is {oldnode: [newnode]} or a iterable of nodes if they do not
1043 have replacements. operation is a string, like "rebase".
1043 have replacements. operation is a string, like "rebase".
1044
1044
1045 metadata is dictionary containing metadata to be stored in obsmarker if
1045 metadata is dictionary containing metadata to be stored in obsmarker if
1046 obsolescence is enabled.
1046 obsolescence is enabled.
1047 """
1047 """
1048 assert fixphase or targetphase is None
1048 assert fixphase or targetphase is None
1049 if not replacements and not moves:
1049 if not replacements and not moves:
1050 return
1050 return
1051
1051
1052 # translate mapping's other forms
1052 # translate mapping's other forms
1053 if not util.safehasattr(replacements, b'items'):
1053 if not util.safehasattr(replacements, b'items'):
1054 replacements = {(n,): () for n in replacements}
1054 replacements = {(n,): () for n in replacements}
1055 else:
1055 else:
1056 # upgrading non tuple "source" to tuple ones for BC
1056 # upgrading non tuple "source" to tuple ones for BC
1057 repls = {}
1057 repls = {}
1058 for key, value in replacements.items():
1058 for key, value in replacements.items():
1059 if not isinstance(key, tuple):
1059 if not isinstance(key, tuple):
1060 key = (key,)
1060 key = (key,)
1061 repls[key] = value
1061 repls[key] = value
1062 replacements = repls
1062 replacements = repls
1063
1063
1064 # Unfiltered repo is needed since nodes in replacements might be hidden.
1064 # Unfiltered repo is needed since nodes in replacements might be hidden.
1065 unfi = repo.unfiltered()
1065 unfi = repo.unfiltered()
1066
1066
1067 # Calculate bookmark movements
1067 # Calculate bookmark movements
1068 if moves is None:
1068 if moves is None:
1069 moves = {}
1069 moves = {}
1070 for oldnodes, newnodes in replacements.items():
1070 for oldnodes, newnodes in replacements.items():
1071 for oldnode in oldnodes:
1071 for oldnode in oldnodes:
1072 if oldnode in moves:
1072 if oldnode in moves:
1073 continue
1073 continue
1074 if len(newnodes) > 1:
1074 if len(newnodes) > 1:
1075 # usually a split, take the one with biggest rev number
1075 # usually a split, take the one with biggest rev number
1076 newnode = next(unfi.set(b'max(%ln)', newnodes)).node()
1076 newnode = next(unfi.set(b'max(%ln)', newnodes)).node()
1077 elif len(newnodes) == 0:
1077 elif len(newnodes) == 0:
1078 # move bookmark backwards
1078 # move bookmark backwards
1079 allreplaced = []
1079 allreplaced = []
1080 for rep in replacements:
1080 for rep in replacements:
1081 allreplaced.extend(rep)
1081 allreplaced.extend(rep)
1082 roots = list(
1082 roots = list(
1083 unfi.set(b'max((::%n) - %ln)', oldnode, allreplaced)
1083 unfi.set(b'max((::%n) - %ln)', oldnode, allreplaced)
1084 )
1084 )
1085 if roots:
1085 if roots:
1086 newnode = roots[0].node()
1086 newnode = roots[0].node()
1087 else:
1087 else:
1088 newnode = repo.nullid
1088 newnode = repo.nullid
1089 else:
1089 else:
1090 newnode = newnodes[0]
1090 newnode = newnodes[0]
1091 moves[oldnode] = newnode
1091 moves[oldnode] = newnode
1092
1092
1093 allnewnodes = [n for ns in replacements.values() for n in ns]
1093 allnewnodes = [n for ns in replacements.values() for n in ns]
1094 toretract = {}
1094 toretract = {}
1095 toadvance = {}
1095 toadvance = {}
1096 if fixphase:
1096 if fixphase:
1097 precursors = {}
1097 precursors = {}
1098 for oldnodes, newnodes in replacements.items():
1098 for oldnodes, newnodes in replacements.items():
1099 for oldnode in oldnodes:
1099 for oldnode in oldnodes:
1100 for newnode in newnodes:
1100 for newnode in newnodes:
1101 precursors.setdefault(newnode, []).append(oldnode)
1101 precursors.setdefault(newnode, []).append(oldnode)
1102
1102
1103 allnewnodes.sort(key=lambda n: unfi[n].rev())
1103 allnewnodes.sort(key=lambda n: unfi[n].rev())
1104 newphases = {}
1104 newphases = {}
1105
1105
1106 def phase(ctx):
1106 def phase(ctx):
1107 return newphases.get(ctx.node(), ctx.phase())
1107 return newphases.get(ctx.node(), ctx.phase())
1108
1108
1109 for newnode in allnewnodes:
1109 for newnode in allnewnodes:
1110 ctx = unfi[newnode]
1110 ctx = unfi[newnode]
1111 parentphase = max(phase(p) for p in ctx.parents())
1111 parentphase = max(phase(p) for p in ctx.parents())
1112 if targetphase is None:
1112 if targetphase is None:
1113 oldphase = max(
1113 oldphase = max(
1114 unfi[oldnode].phase() for oldnode in precursors[newnode]
1114 unfi[oldnode].phase() for oldnode in precursors[newnode]
1115 )
1115 )
1116 newphase = max(oldphase, parentphase)
1116 newphase = max(oldphase, parentphase)
1117 else:
1117 else:
1118 newphase = max(targetphase, parentphase)
1118 newphase = max(targetphase, parentphase)
1119 newphases[newnode] = newphase
1119 newphases[newnode] = newphase
1120 if newphase > ctx.phase():
1120 if newphase > ctx.phase():
1121 toretract.setdefault(newphase, []).append(newnode)
1121 toretract.setdefault(newphase, []).append(newnode)
1122 elif newphase < ctx.phase():
1122 elif newphase < ctx.phase():
1123 toadvance.setdefault(newphase, []).append(newnode)
1123 toadvance.setdefault(newphase, []).append(newnode)
1124
1124
1125 with repo.transaction(b'cleanup') as tr:
1125 with repo.transaction(b'cleanup') as tr:
1126 # Move bookmarks
1126 # Move bookmarks
1127 bmarks = repo._bookmarks
1127 bmarks = repo._bookmarks
1128 bmarkchanges = []
1128 bmarkchanges = []
1129 for oldnode, newnode in moves.items():
1129 for oldnode, newnode in moves.items():
1130 oldbmarks = repo.nodebookmarks(oldnode)
1130 oldbmarks = repo.nodebookmarks(oldnode)
1131 if not oldbmarks:
1131 if not oldbmarks:
1132 continue
1132 continue
1133 from . import bookmarks # avoid import cycle
1133 from . import bookmarks # avoid import cycle
1134
1134
1135 repo.ui.debug(
1135 repo.ui.debug(
1136 b'moving bookmarks %r from %s to %s\n'
1136 b'moving bookmarks %r from %s to %s\n'
1137 % (
1137 % (
1138 pycompat.rapply(pycompat.maybebytestr, oldbmarks),
1138 pycompat.rapply(pycompat.maybebytestr, oldbmarks),
1139 hex(oldnode),
1139 hex(oldnode),
1140 hex(newnode),
1140 hex(newnode),
1141 )
1141 )
1142 )
1142 )
1143 # Delete divergent bookmarks being parents of related newnodes
1143 # Delete divergent bookmarks being parents of related newnodes
1144 deleterevs = repo.revs(
1144 deleterevs = repo.revs(
1145 b'parents(roots(%ln & (::%n))) - parents(%n)',
1145 b'parents(roots(%ln & (::%n))) - parents(%n)',
1146 allnewnodes,
1146 allnewnodes,
1147 newnode,
1147 newnode,
1148 oldnode,
1148 oldnode,
1149 )
1149 )
1150 deletenodes = _containsnode(repo, deleterevs)
1150 deletenodes = _containsnode(repo, deleterevs)
1151 for name in oldbmarks:
1151 for name in oldbmarks:
1152 bmarkchanges.append((name, newnode))
1152 bmarkchanges.append((name, newnode))
1153 for b in bookmarks.divergent2delete(repo, deletenodes, name):
1153 for b in bookmarks.divergent2delete(repo, deletenodes, name):
1154 bmarkchanges.append((b, None))
1154 bmarkchanges.append((b, None))
1155
1155
1156 if bmarkchanges:
1156 if bmarkchanges:
1157 bmarks.applychanges(repo, tr, bmarkchanges)
1157 bmarks.applychanges(repo, tr, bmarkchanges)
1158
1158
1159 for phase, nodes in toretract.items():
1159 for phase, nodes in toretract.items():
1160 phases.retractboundary(repo, tr, phase, nodes)
1160 phases.retractboundary(repo, tr, phase, nodes)
1161 for phase, nodes in toadvance.items():
1161 for phase, nodes in toadvance.items():
1162 phases.advanceboundary(repo, tr, phase, nodes)
1162 phases.advanceboundary(repo, tr, phase, nodes)
1163
1163
1164 mayusearchived = repo.ui.config(b'experimental', b'cleanup-as-archived')
1164 mayusearchived = repo.ui.config(b'experimental', b'cleanup-as-archived')
1165 # Obsolete or strip nodes
1165 # Obsolete or strip nodes
1166 if obsolete.isenabled(repo, obsolete.createmarkersopt):
1166 if obsolete.isenabled(repo, obsolete.createmarkersopt):
1167 # If a node is already obsoleted, and we want to obsolete it
1167 # If a node is already obsoleted, and we want to obsolete it
1168 # without a successor, skip that obssolete request since it's
1168 # without a successor, skip that obssolete request since it's
1169 # unnecessary. That's the "if s or not isobs(n)" check below.
1169 # unnecessary. That's the "if s or not isobs(n)" check below.
1170 # Also sort the node in topology order, that might be useful for
1170 # Also sort the node in topology order, that might be useful for
1171 # some obsstore logic.
1171 # some obsstore logic.
1172 # NOTE: the sorting might belong to createmarkers.
1172 # NOTE: the sorting might belong to createmarkers.
1173 torev = unfi.changelog.rev
1173 torev = unfi.changelog.rev
1174 sortfunc = lambda ns: torev(ns[0][0])
1174 sortfunc = lambda ns: torev(ns[0][0])
1175 rels = []
1175 rels = []
1176 for ns, s in sorted(replacements.items(), key=sortfunc):
1176 for ns, s in sorted(replacements.items(), key=sortfunc):
1177 rel = (tuple(unfi[n] for n in ns), tuple(unfi[m] for m in s))
1177 rel = (tuple(unfi[n] for n in ns), tuple(unfi[m] for m in s))
1178 rels.append(rel)
1178 rels.append(rel)
1179 if rels:
1179 if rels:
1180 obsolete.createmarkers(
1180 obsolete.createmarkers(
1181 repo, rels, operation=operation, metadata=metadata
1181 repo, rels, operation=operation, metadata=metadata
1182 )
1182 )
1183 elif phases.supportinternal(repo) and mayusearchived:
1183 elif phases.supportinternal(repo) and mayusearchived:
1184 # this assume we do not have "unstable" nodes above the cleaned ones
1184 # this assume we do not have "unstable" nodes above the cleaned ones
1185 allreplaced = set()
1185 allreplaced = set()
1186 for ns in replacements.keys():
1186 for ns in replacements.keys():
1187 allreplaced.update(ns)
1187 allreplaced.update(ns)
1188 if backup:
1188 if backup:
1189 from . import repair # avoid import cycle
1189 from . import repair # avoid import cycle
1190
1190
1191 node = min(allreplaced, key=repo.changelog.rev)
1191 node = min(allreplaced, key=repo.changelog.rev)
1192 repair.backupbundle(
1192 repair.backupbundle(
1193 repo, allreplaced, allreplaced, node, operation
1193 repo, allreplaced, allreplaced, node, operation
1194 )
1194 )
1195 phases.retractboundary(repo, tr, phases.archived, allreplaced)
1195 phases.retractboundary(repo, tr, phases.archived, allreplaced)
1196 else:
1196 else:
1197 from . import repair # avoid import cycle
1197 from . import repair # avoid import cycle
1198
1198
1199 tostrip = list(n for ns in replacements for n in ns)
1199 tostrip = list(n for ns in replacements for n in ns)
1200 if tostrip:
1200 if tostrip:
1201 repair.delayedstrip(
1201 repair.delayedstrip(
1202 repo.ui, repo, tostrip, operation, backup=backup
1202 repo.ui, repo, tostrip, operation, backup=backup
1203 )
1203 )
1204
1204
1205
1205
1206 def addremove(repo, matcher, prefix, uipathfn, opts=None):
1206 def addremove(repo, matcher, prefix, uipathfn, opts=None):
1207 if opts is None:
1207 if opts is None:
1208 opts = {}
1208 opts = {}
1209 m = matcher
1209 m = matcher
1210 dry_run = opts.get(b'dry_run')
1210 dry_run = opts.get(b'dry_run')
1211 try:
1211 try:
1212 similarity = float(opts.get(b'similarity') or 0)
1212 similarity = float(opts.get(b'similarity') or 0)
1213 except ValueError:
1213 except ValueError:
1214 raise error.Abort(_(b'similarity must be a number'))
1214 raise error.InputError(_(b'similarity must be a number'))
1215 if similarity < 0 or similarity > 100:
1215 if similarity < 0 or similarity > 100:
1216 raise error.Abort(_(b'similarity must be between 0 and 100'))
1216 raise error.InputError(_(b'similarity must be between 0 and 100'))
1217 similarity /= 100.0
1217 similarity /= 100.0
1218
1218
1219 ret = 0
1219 ret = 0
1220
1220
1221 wctx = repo[None]
1221 wctx = repo[None]
1222 for subpath in sorted(wctx.substate):
1222 for subpath in sorted(wctx.substate):
1223 submatch = matchmod.subdirmatcher(subpath, m)
1223 submatch = matchmod.subdirmatcher(subpath, m)
1224 if opts.get(b'subrepos') or m.exact(subpath) or any(submatch.files()):
1224 if opts.get(b'subrepos') or m.exact(subpath) or any(submatch.files()):
1225 sub = wctx.sub(subpath)
1225 sub = wctx.sub(subpath)
1226 subprefix = repo.wvfs.reljoin(prefix, subpath)
1226 subprefix = repo.wvfs.reljoin(prefix, subpath)
1227 subuipathfn = subdiruipathfn(subpath, uipathfn)
1227 subuipathfn = subdiruipathfn(subpath, uipathfn)
1228 try:
1228 try:
1229 if sub.addremove(submatch, subprefix, subuipathfn, opts):
1229 if sub.addremove(submatch, subprefix, subuipathfn, opts):
1230 ret = 1
1230 ret = 1
1231 except error.LookupError:
1231 except error.LookupError:
1232 repo.ui.status(
1232 repo.ui.status(
1233 _(b"skipping missing subrepository: %s\n")
1233 _(b"skipping missing subrepository: %s\n")
1234 % uipathfn(subpath)
1234 % uipathfn(subpath)
1235 )
1235 )
1236
1236
1237 rejected = []
1237 rejected = []
1238
1238
1239 def badfn(f, msg):
1239 def badfn(f, msg):
1240 if f in m.files():
1240 if f in m.files():
1241 m.bad(f, msg)
1241 m.bad(f, msg)
1242 rejected.append(f)
1242 rejected.append(f)
1243
1243
1244 badmatch = matchmod.badmatch(m, badfn)
1244 badmatch = matchmod.badmatch(m, badfn)
1245 added, unknown, deleted, removed, forgotten = _interestingfiles(
1245 added, unknown, deleted, removed, forgotten = _interestingfiles(
1246 repo, badmatch
1246 repo, badmatch
1247 )
1247 )
1248
1248
1249 unknownset = set(unknown + forgotten)
1249 unknownset = set(unknown + forgotten)
1250 toprint = unknownset.copy()
1250 toprint = unknownset.copy()
1251 toprint.update(deleted)
1251 toprint.update(deleted)
1252 for abs in sorted(toprint):
1252 for abs in sorted(toprint):
1253 if repo.ui.verbose or not m.exact(abs):
1253 if repo.ui.verbose or not m.exact(abs):
1254 if abs in unknownset:
1254 if abs in unknownset:
1255 status = _(b'adding %s\n') % uipathfn(abs)
1255 status = _(b'adding %s\n') % uipathfn(abs)
1256 label = b'ui.addremove.added'
1256 label = b'ui.addremove.added'
1257 else:
1257 else:
1258 status = _(b'removing %s\n') % uipathfn(abs)
1258 status = _(b'removing %s\n') % uipathfn(abs)
1259 label = b'ui.addremove.removed'
1259 label = b'ui.addremove.removed'
1260 repo.ui.status(status, label=label)
1260 repo.ui.status(status, label=label)
1261
1261
1262 renames = _findrenames(
1262 renames = _findrenames(
1263 repo, m, added + unknown, removed + deleted, similarity, uipathfn
1263 repo, m, added + unknown, removed + deleted, similarity, uipathfn
1264 )
1264 )
1265
1265
1266 if not dry_run:
1266 if not dry_run:
1267 _markchanges(repo, unknown + forgotten, deleted, renames)
1267 _markchanges(repo, unknown + forgotten, deleted, renames)
1268
1268
1269 for f in rejected:
1269 for f in rejected:
1270 if f in m.files():
1270 if f in m.files():
1271 return 1
1271 return 1
1272 return ret
1272 return ret
1273
1273
1274
1274
1275 def marktouched(repo, files, similarity=0.0):
1275 def marktouched(repo, files, similarity=0.0):
1276 """Assert that files have somehow been operated upon. files are relative to
1276 """Assert that files have somehow been operated upon. files are relative to
1277 the repo root."""
1277 the repo root."""
1278 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
1278 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
1279 rejected = []
1279 rejected = []
1280
1280
1281 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
1281 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
1282
1282
1283 if repo.ui.verbose:
1283 if repo.ui.verbose:
1284 unknownset = set(unknown + forgotten)
1284 unknownset = set(unknown + forgotten)
1285 toprint = unknownset.copy()
1285 toprint = unknownset.copy()
1286 toprint.update(deleted)
1286 toprint.update(deleted)
1287 for abs in sorted(toprint):
1287 for abs in sorted(toprint):
1288 if abs in unknownset:
1288 if abs in unknownset:
1289 status = _(b'adding %s\n') % abs
1289 status = _(b'adding %s\n') % abs
1290 else:
1290 else:
1291 status = _(b'removing %s\n') % abs
1291 status = _(b'removing %s\n') % abs
1292 repo.ui.status(status)
1292 repo.ui.status(status)
1293
1293
1294 # TODO: We should probably have the caller pass in uipathfn and apply it to
1294 # TODO: We should probably have the caller pass in uipathfn and apply it to
1295 # the messages above too. legacyrelativevalue=True is consistent with how
1295 # the messages above too. legacyrelativevalue=True is consistent with how
1296 # it used to work.
1296 # it used to work.
1297 uipathfn = getuipathfn(repo, legacyrelativevalue=True)
1297 uipathfn = getuipathfn(repo, legacyrelativevalue=True)
1298 renames = _findrenames(
1298 renames = _findrenames(
1299 repo, m, added + unknown, removed + deleted, similarity, uipathfn
1299 repo, m, added + unknown, removed + deleted, similarity, uipathfn
1300 )
1300 )
1301
1301
1302 _markchanges(repo, unknown + forgotten, deleted, renames)
1302 _markchanges(repo, unknown + forgotten, deleted, renames)
1303
1303
1304 for f in rejected:
1304 for f in rejected:
1305 if f in m.files():
1305 if f in m.files():
1306 return 1
1306 return 1
1307 return 0
1307 return 0
1308
1308
1309
1309
1310 def _interestingfiles(repo, matcher):
1310 def _interestingfiles(repo, matcher):
1311 """Walk dirstate with matcher, looking for files that addremove would care
1311 """Walk dirstate with matcher, looking for files that addremove would care
1312 about.
1312 about.
1313
1313
1314 This is different from dirstate.status because it doesn't care about
1314 This is different from dirstate.status because it doesn't care about
1315 whether files are modified or clean."""
1315 whether files are modified or clean."""
1316 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1316 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1317 audit_path = pathutil.pathauditor(repo.root, cached=True)
1317 audit_path = pathutil.pathauditor(repo.root, cached=True)
1318
1318
1319 ctx = repo[None]
1319 ctx = repo[None]
1320 dirstate = repo.dirstate
1320 dirstate = repo.dirstate
1321 matcher = repo.narrowmatch(matcher, includeexact=True)
1321 matcher = repo.narrowmatch(matcher, includeexact=True)
1322 walkresults = dirstate.walk(
1322 walkresults = dirstate.walk(
1323 matcher,
1323 matcher,
1324 subrepos=sorted(ctx.substate),
1324 subrepos=sorted(ctx.substate),
1325 unknown=True,
1325 unknown=True,
1326 ignored=False,
1326 ignored=False,
1327 full=False,
1327 full=False,
1328 )
1328 )
1329 for abs, st in pycompat.iteritems(walkresults):
1329 for abs, st in pycompat.iteritems(walkresults):
1330 dstate = dirstate[abs]
1330 dstate = dirstate[abs]
1331 if dstate == b'?' and audit_path.check(abs):
1331 if dstate == b'?' and audit_path.check(abs):
1332 unknown.append(abs)
1332 unknown.append(abs)
1333 elif dstate != b'r' and not st:
1333 elif dstate != b'r' and not st:
1334 deleted.append(abs)
1334 deleted.append(abs)
1335 elif dstate == b'r' and st:
1335 elif dstate == b'r' and st:
1336 forgotten.append(abs)
1336 forgotten.append(abs)
1337 # for finding renames
1337 # for finding renames
1338 elif dstate == b'r' and not st:
1338 elif dstate == b'r' and not st:
1339 removed.append(abs)
1339 removed.append(abs)
1340 elif dstate == b'a':
1340 elif dstate == b'a':
1341 added.append(abs)
1341 added.append(abs)
1342
1342
1343 return added, unknown, deleted, removed, forgotten
1343 return added, unknown, deleted, removed, forgotten
1344
1344
1345
1345
1346 def _findrenames(repo, matcher, added, removed, similarity, uipathfn):
1346 def _findrenames(repo, matcher, added, removed, similarity, uipathfn):
1347 '''Find renames from removed files to added ones.'''
1347 '''Find renames from removed files to added ones.'''
1348 renames = {}
1348 renames = {}
1349 if similarity > 0:
1349 if similarity > 0:
1350 for old, new, score in similar.findrenames(
1350 for old, new, score in similar.findrenames(
1351 repo, added, removed, similarity
1351 repo, added, removed, similarity
1352 ):
1352 ):
1353 if (
1353 if (
1354 repo.ui.verbose
1354 repo.ui.verbose
1355 or not matcher.exact(old)
1355 or not matcher.exact(old)
1356 or not matcher.exact(new)
1356 or not matcher.exact(new)
1357 ):
1357 ):
1358 repo.ui.status(
1358 repo.ui.status(
1359 _(
1359 _(
1360 b'recording removal of %s as rename to %s '
1360 b'recording removal of %s as rename to %s '
1361 b'(%d%% similar)\n'
1361 b'(%d%% similar)\n'
1362 )
1362 )
1363 % (uipathfn(old), uipathfn(new), score * 100)
1363 % (uipathfn(old), uipathfn(new), score * 100)
1364 )
1364 )
1365 renames[new] = old
1365 renames[new] = old
1366 return renames
1366 return renames
1367
1367
1368
1368
1369 def _markchanges(repo, unknown, deleted, renames):
1369 def _markchanges(repo, unknown, deleted, renames):
1370 """Marks the files in unknown as added, the files in deleted as removed,
1370 """Marks the files in unknown as added, the files in deleted as removed,
1371 and the files in renames as copied."""
1371 and the files in renames as copied."""
1372 wctx = repo[None]
1372 wctx = repo[None]
1373 with repo.wlock():
1373 with repo.wlock():
1374 wctx.forget(deleted)
1374 wctx.forget(deleted)
1375 wctx.add(unknown)
1375 wctx.add(unknown)
1376 for new, old in pycompat.iteritems(renames):
1376 for new, old in pycompat.iteritems(renames):
1377 wctx.copy(old, new)
1377 wctx.copy(old, new)
1378
1378
1379
1379
1380 def getrenamedfn(repo, endrev=None):
1380 def getrenamedfn(repo, endrev=None):
1381 if copiesmod.usechangesetcentricalgo(repo):
1381 if copiesmod.usechangesetcentricalgo(repo):
1382
1382
1383 def getrenamed(fn, rev):
1383 def getrenamed(fn, rev):
1384 ctx = repo[rev]
1384 ctx = repo[rev]
1385 p1copies = ctx.p1copies()
1385 p1copies = ctx.p1copies()
1386 if fn in p1copies:
1386 if fn in p1copies:
1387 return p1copies[fn]
1387 return p1copies[fn]
1388 p2copies = ctx.p2copies()
1388 p2copies = ctx.p2copies()
1389 if fn in p2copies:
1389 if fn in p2copies:
1390 return p2copies[fn]
1390 return p2copies[fn]
1391 return None
1391 return None
1392
1392
1393 return getrenamed
1393 return getrenamed
1394
1394
1395 rcache = {}
1395 rcache = {}
1396 if endrev is None:
1396 if endrev is None:
1397 endrev = len(repo)
1397 endrev = len(repo)
1398
1398
1399 def getrenamed(fn, rev):
1399 def getrenamed(fn, rev):
1400 """looks up all renames for a file (up to endrev) the first
1400 """looks up all renames for a file (up to endrev) the first
1401 time the file is given. It indexes on the changerev and only
1401 time the file is given. It indexes on the changerev and only
1402 parses the manifest if linkrev != changerev.
1402 parses the manifest if linkrev != changerev.
1403 Returns rename info for fn at changerev rev."""
1403 Returns rename info for fn at changerev rev."""
1404 if fn not in rcache:
1404 if fn not in rcache:
1405 rcache[fn] = {}
1405 rcache[fn] = {}
1406 fl = repo.file(fn)
1406 fl = repo.file(fn)
1407 for i in fl:
1407 for i in fl:
1408 lr = fl.linkrev(i)
1408 lr = fl.linkrev(i)
1409 renamed = fl.renamed(fl.node(i))
1409 renamed = fl.renamed(fl.node(i))
1410 rcache[fn][lr] = renamed and renamed[0]
1410 rcache[fn][lr] = renamed and renamed[0]
1411 if lr >= endrev:
1411 if lr >= endrev:
1412 break
1412 break
1413 if rev in rcache[fn]:
1413 if rev in rcache[fn]:
1414 return rcache[fn][rev]
1414 return rcache[fn][rev]
1415
1415
1416 # If linkrev != rev (i.e. rev not found in rcache) fallback to
1416 # If linkrev != rev (i.e. rev not found in rcache) fallback to
1417 # filectx logic.
1417 # filectx logic.
1418 try:
1418 try:
1419 return repo[rev][fn].copysource()
1419 return repo[rev][fn].copysource()
1420 except error.LookupError:
1420 except error.LookupError:
1421 return None
1421 return None
1422
1422
1423 return getrenamed
1423 return getrenamed
1424
1424
1425
1425
1426 def getcopiesfn(repo, endrev=None):
1426 def getcopiesfn(repo, endrev=None):
1427 if copiesmod.usechangesetcentricalgo(repo):
1427 if copiesmod.usechangesetcentricalgo(repo):
1428
1428
1429 def copiesfn(ctx):
1429 def copiesfn(ctx):
1430 if ctx.p2copies():
1430 if ctx.p2copies():
1431 allcopies = ctx.p1copies().copy()
1431 allcopies = ctx.p1copies().copy()
1432 # There should be no overlap
1432 # There should be no overlap
1433 allcopies.update(ctx.p2copies())
1433 allcopies.update(ctx.p2copies())
1434 return sorted(allcopies.items())
1434 return sorted(allcopies.items())
1435 else:
1435 else:
1436 return sorted(ctx.p1copies().items())
1436 return sorted(ctx.p1copies().items())
1437
1437
1438 else:
1438 else:
1439 getrenamed = getrenamedfn(repo, endrev)
1439 getrenamed = getrenamedfn(repo, endrev)
1440
1440
1441 def copiesfn(ctx):
1441 def copiesfn(ctx):
1442 copies = []
1442 copies = []
1443 for fn in ctx.files():
1443 for fn in ctx.files():
1444 rename = getrenamed(fn, ctx.rev())
1444 rename = getrenamed(fn, ctx.rev())
1445 if rename:
1445 if rename:
1446 copies.append((fn, rename))
1446 copies.append((fn, rename))
1447 return copies
1447 return copies
1448
1448
1449 return copiesfn
1449 return copiesfn
1450
1450
1451
1451
1452 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1452 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1453 """Update the dirstate to reflect the intent of copying src to dst. For
1453 """Update the dirstate to reflect the intent of copying src to dst. For
1454 different reasons it might not end with dst being marked as copied from src.
1454 different reasons it might not end with dst being marked as copied from src.
1455 """
1455 """
1456 origsrc = repo.dirstate.copied(src) or src
1456 origsrc = repo.dirstate.copied(src) or src
1457 if dst == origsrc: # copying back a copy?
1457 if dst == origsrc: # copying back a copy?
1458 if repo.dirstate[dst] not in b'mn' and not dryrun:
1458 if repo.dirstate[dst] not in b'mn' and not dryrun:
1459 repo.dirstate.set_tracked(dst)
1459 repo.dirstate.set_tracked(dst)
1460 else:
1460 else:
1461 if repo.dirstate[origsrc] == b'a' and origsrc == src:
1461 if repo.dirstate[origsrc] == b'a' and origsrc == src:
1462 if not ui.quiet:
1462 if not ui.quiet:
1463 ui.warn(
1463 ui.warn(
1464 _(
1464 _(
1465 b"%s has not been committed yet, so no copy "
1465 b"%s has not been committed yet, so no copy "
1466 b"data will be stored for %s.\n"
1466 b"data will be stored for %s.\n"
1467 )
1467 )
1468 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd))
1468 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd))
1469 )
1469 )
1470 if repo.dirstate[dst] in b'?r' and not dryrun:
1470 if repo.dirstate[dst] in b'?r' and not dryrun:
1471 wctx.add([dst])
1471 wctx.add([dst])
1472 elif not dryrun:
1472 elif not dryrun:
1473 wctx.copy(origsrc, dst)
1473 wctx.copy(origsrc, dst)
1474
1474
1475
1475
1476 def movedirstate(repo, newctx, match=None):
1476 def movedirstate(repo, newctx, match=None):
1477 """Move the dirstate to newctx and adjust it as necessary.
1477 """Move the dirstate to newctx and adjust it as necessary.
1478
1478
1479 A matcher can be provided as an optimization. It is probably a bug to pass
1479 A matcher can be provided as an optimization. It is probably a bug to pass
1480 a matcher that doesn't match all the differences between the parent of the
1480 a matcher that doesn't match all the differences between the parent of the
1481 working copy and newctx.
1481 working copy and newctx.
1482 """
1482 """
1483 oldctx = repo[b'.']
1483 oldctx = repo[b'.']
1484 ds = repo.dirstate
1484 ds = repo.dirstate
1485 copies = dict(ds.copies())
1485 copies = dict(ds.copies())
1486 ds.setparents(newctx.node(), repo.nullid)
1486 ds.setparents(newctx.node(), repo.nullid)
1487 s = newctx.status(oldctx, match=match)
1487 s = newctx.status(oldctx, match=match)
1488
1488
1489 for f in s.modified:
1489 for f in s.modified:
1490 ds.update_file_p1(f, p1_tracked=True)
1490 ds.update_file_p1(f, p1_tracked=True)
1491
1491
1492 for f in s.added:
1492 for f in s.added:
1493 ds.update_file_p1(f, p1_tracked=False)
1493 ds.update_file_p1(f, p1_tracked=False)
1494
1494
1495 for f in s.removed:
1495 for f in s.removed:
1496 ds.update_file_p1(f, p1_tracked=True)
1496 ds.update_file_p1(f, p1_tracked=True)
1497
1497
1498 # Merge old parent and old working dir copies
1498 # Merge old parent and old working dir copies
1499 oldcopies = copiesmod.pathcopies(newctx, oldctx, match)
1499 oldcopies = copiesmod.pathcopies(newctx, oldctx, match)
1500 oldcopies.update(copies)
1500 oldcopies.update(copies)
1501 copies = {
1501 copies = {
1502 dst: oldcopies.get(src, src)
1502 dst: oldcopies.get(src, src)
1503 for dst, src in pycompat.iteritems(oldcopies)
1503 for dst, src in pycompat.iteritems(oldcopies)
1504 }
1504 }
1505 # Adjust the dirstate copies
1505 # Adjust the dirstate copies
1506 for dst, src in pycompat.iteritems(copies):
1506 for dst, src in pycompat.iteritems(copies):
1507 if src not in newctx or dst in newctx or ds[dst] != b'a':
1507 if src not in newctx or dst in newctx or ds[dst] != b'a':
1508 src = None
1508 src = None
1509 ds.copy(src, dst)
1509 ds.copy(src, dst)
1510 repo._quick_access_changeid_invalidate()
1510 repo._quick_access_changeid_invalidate()
1511
1511
1512
1512
1513 def filterrequirements(requirements):
1513 def filterrequirements(requirements):
1514 """filters the requirements into two sets:
1514 """filters the requirements into two sets:
1515
1515
1516 wcreq: requirements which should be written in .hg/requires
1516 wcreq: requirements which should be written in .hg/requires
1517 storereq: which should be written in .hg/store/requires
1517 storereq: which should be written in .hg/store/requires
1518
1518
1519 Returns (wcreq, storereq)
1519 Returns (wcreq, storereq)
1520 """
1520 """
1521 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
1521 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
1522 wc, store = set(), set()
1522 wc, store = set(), set()
1523 for r in requirements:
1523 for r in requirements:
1524 if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
1524 if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
1525 wc.add(r)
1525 wc.add(r)
1526 else:
1526 else:
1527 store.add(r)
1527 store.add(r)
1528 return wc, store
1528 return wc, store
1529 return requirements, None
1529 return requirements, None
1530
1530
1531
1531
1532 def istreemanifest(repo):
1532 def istreemanifest(repo):
1533 """returns whether the repository is using treemanifest or not"""
1533 """returns whether the repository is using treemanifest or not"""
1534 return requirementsmod.TREEMANIFEST_REQUIREMENT in repo.requirements
1534 return requirementsmod.TREEMANIFEST_REQUIREMENT in repo.requirements
1535
1535
1536
1536
1537 def writereporequirements(repo, requirements=None):
1537 def writereporequirements(repo, requirements=None):
1538 """writes requirements for the repo
1538 """writes requirements for the repo
1539
1539
1540 Requirements are written to .hg/requires and .hg/store/requires based
1540 Requirements are written to .hg/requires and .hg/store/requires based
1541 on whether share-safe mode is enabled and which requirements are wdir
1541 on whether share-safe mode is enabled and which requirements are wdir
1542 requirements and which are store requirements
1542 requirements and which are store requirements
1543 """
1543 """
1544 if requirements:
1544 if requirements:
1545 repo.requirements = requirements
1545 repo.requirements = requirements
1546 wcreq, storereq = filterrequirements(repo.requirements)
1546 wcreq, storereq = filterrequirements(repo.requirements)
1547 if wcreq is not None:
1547 if wcreq is not None:
1548 writerequires(repo.vfs, wcreq)
1548 writerequires(repo.vfs, wcreq)
1549 if storereq is not None:
1549 if storereq is not None:
1550 writerequires(repo.svfs, storereq)
1550 writerequires(repo.svfs, storereq)
1551 elif repo.ui.configbool(b'format', b'usestore'):
1551 elif repo.ui.configbool(b'format', b'usestore'):
1552 # only remove store requires if we are using store
1552 # only remove store requires if we are using store
1553 repo.svfs.tryunlink(b'requires')
1553 repo.svfs.tryunlink(b'requires')
1554
1554
1555
1555
1556 def writerequires(opener, requirements):
1556 def writerequires(opener, requirements):
1557 with opener(b'requires', b'w', atomictemp=True) as fp:
1557 with opener(b'requires', b'w', atomictemp=True) as fp:
1558 for r in sorted(requirements):
1558 for r in sorted(requirements):
1559 fp.write(b"%s\n" % r)
1559 fp.write(b"%s\n" % r)
1560
1560
1561
1561
1562 class filecachesubentry(object):
1562 class filecachesubentry(object):
1563 def __init__(self, path, stat):
1563 def __init__(self, path, stat):
1564 self.path = path
1564 self.path = path
1565 self.cachestat = None
1565 self.cachestat = None
1566 self._cacheable = None
1566 self._cacheable = None
1567
1567
1568 if stat:
1568 if stat:
1569 self.cachestat = filecachesubentry.stat(self.path)
1569 self.cachestat = filecachesubentry.stat(self.path)
1570
1570
1571 if self.cachestat:
1571 if self.cachestat:
1572 self._cacheable = self.cachestat.cacheable()
1572 self._cacheable = self.cachestat.cacheable()
1573 else:
1573 else:
1574 # None means we don't know yet
1574 # None means we don't know yet
1575 self._cacheable = None
1575 self._cacheable = None
1576
1576
1577 def refresh(self):
1577 def refresh(self):
1578 if self.cacheable():
1578 if self.cacheable():
1579 self.cachestat = filecachesubentry.stat(self.path)
1579 self.cachestat = filecachesubentry.stat(self.path)
1580
1580
1581 def cacheable(self):
1581 def cacheable(self):
1582 if self._cacheable is not None:
1582 if self._cacheable is not None:
1583 return self._cacheable
1583 return self._cacheable
1584
1584
1585 # we don't know yet, assume it is for now
1585 # we don't know yet, assume it is for now
1586 return True
1586 return True
1587
1587
1588 def changed(self):
1588 def changed(self):
1589 # no point in going further if we can't cache it
1589 # no point in going further if we can't cache it
1590 if not self.cacheable():
1590 if not self.cacheable():
1591 return True
1591 return True
1592
1592
1593 newstat = filecachesubentry.stat(self.path)
1593 newstat = filecachesubentry.stat(self.path)
1594
1594
1595 # we may not know if it's cacheable yet, check again now
1595 # we may not know if it's cacheable yet, check again now
1596 if newstat and self._cacheable is None:
1596 if newstat and self._cacheable is None:
1597 self._cacheable = newstat.cacheable()
1597 self._cacheable = newstat.cacheable()
1598
1598
1599 # check again
1599 # check again
1600 if not self._cacheable:
1600 if not self._cacheable:
1601 return True
1601 return True
1602
1602
1603 if self.cachestat != newstat:
1603 if self.cachestat != newstat:
1604 self.cachestat = newstat
1604 self.cachestat = newstat
1605 return True
1605 return True
1606 else:
1606 else:
1607 return False
1607 return False
1608
1608
1609 @staticmethod
1609 @staticmethod
1610 def stat(path):
1610 def stat(path):
1611 try:
1611 try:
1612 return util.cachestat(path)
1612 return util.cachestat(path)
1613 except OSError as e:
1613 except OSError as e:
1614 if e.errno != errno.ENOENT:
1614 if e.errno != errno.ENOENT:
1615 raise
1615 raise
1616
1616
1617
1617
1618 class filecacheentry(object):
1618 class filecacheentry(object):
1619 def __init__(self, paths, stat=True):
1619 def __init__(self, paths, stat=True):
1620 self._entries = []
1620 self._entries = []
1621 for path in paths:
1621 for path in paths:
1622 self._entries.append(filecachesubentry(path, stat))
1622 self._entries.append(filecachesubentry(path, stat))
1623
1623
1624 def changed(self):
1624 def changed(self):
1625 '''true if any entry has changed'''
1625 '''true if any entry has changed'''
1626 for entry in self._entries:
1626 for entry in self._entries:
1627 if entry.changed():
1627 if entry.changed():
1628 return True
1628 return True
1629 return False
1629 return False
1630
1630
1631 def refresh(self):
1631 def refresh(self):
1632 for entry in self._entries:
1632 for entry in self._entries:
1633 entry.refresh()
1633 entry.refresh()
1634
1634
1635
1635
1636 class filecache(object):
1636 class filecache(object):
1637 """A property like decorator that tracks files under .hg/ for updates.
1637 """A property like decorator that tracks files under .hg/ for updates.
1638
1638
1639 On first access, the files defined as arguments are stat()ed and the
1639 On first access, the files defined as arguments are stat()ed and the
1640 results cached. The decorated function is called. The results are stashed
1640 results cached. The decorated function is called. The results are stashed
1641 away in a ``_filecache`` dict on the object whose method is decorated.
1641 away in a ``_filecache`` dict on the object whose method is decorated.
1642
1642
1643 On subsequent access, the cached result is used as it is set to the
1643 On subsequent access, the cached result is used as it is set to the
1644 instance dictionary.
1644 instance dictionary.
1645
1645
1646 On external property set/delete operations, the caller must update the
1646 On external property set/delete operations, the caller must update the
1647 corresponding _filecache entry appropriately. Use __class__.<attr>.set()
1647 corresponding _filecache entry appropriately. Use __class__.<attr>.set()
1648 instead of directly setting <attr>.
1648 instead of directly setting <attr>.
1649
1649
1650 When using the property API, the cached data is always used if available.
1650 When using the property API, the cached data is always used if available.
1651 No stat() is performed to check if the file has changed.
1651 No stat() is performed to check if the file has changed.
1652
1652
1653 Others can muck about with the state of the ``_filecache`` dict. e.g. they
1653 Others can muck about with the state of the ``_filecache`` dict. e.g. they
1654 can populate an entry before the property's getter is called. In this case,
1654 can populate an entry before the property's getter is called. In this case,
1655 entries in ``_filecache`` will be used during property operations,
1655 entries in ``_filecache`` will be used during property operations,
1656 if available. If the underlying file changes, it is up to external callers
1656 if available. If the underlying file changes, it is up to external callers
1657 to reflect this by e.g. calling ``delattr(obj, attr)`` to remove the cached
1657 to reflect this by e.g. calling ``delattr(obj, attr)`` to remove the cached
1658 method result as well as possibly calling ``del obj._filecache[attr]`` to
1658 method result as well as possibly calling ``del obj._filecache[attr]`` to
1659 remove the ``filecacheentry``.
1659 remove the ``filecacheentry``.
1660 """
1660 """
1661
1661
1662 def __init__(self, *paths):
1662 def __init__(self, *paths):
1663 self.paths = paths
1663 self.paths = paths
1664
1664
1665 def join(self, obj, fname):
1665 def join(self, obj, fname):
1666 """Used to compute the runtime path of a cached file.
1666 """Used to compute the runtime path of a cached file.
1667
1667
1668 Users should subclass filecache and provide their own version of this
1668 Users should subclass filecache and provide their own version of this
1669 function to call the appropriate join function on 'obj' (an instance
1669 function to call the appropriate join function on 'obj' (an instance
1670 of the class that its member function was decorated).
1670 of the class that its member function was decorated).
1671 """
1671 """
1672 raise NotImplementedError
1672 raise NotImplementedError
1673
1673
1674 def __call__(self, func):
1674 def __call__(self, func):
1675 self.func = func
1675 self.func = func
1676 self.sname = func.__name__
1676 self.sname = func.__name__
1677 self.name = pycompat.sysbytes(self.sname)
1677 self.name = pycompat.sysbytes(self.sname)
1678 return self
1678 return self
1679
1679
1680 def __get__(self, obj, type=None):
1680 def __get__(self, obj, type=None):
1681 # if accessed on the class, return the descriptor itself.
1681 # if accessed on the class, return the descriptor itself.
1682 if obj is None:
1682 if obj is None:
1683 return self
1683 return self
1684
1684
1685 assert self.sname not in obj.__dict__
1685 assert self.sname not in obj.__dict__
1686
1686
1687 entry = obj._filecache.get(self.name)
1687 entry = obj._filecache.get(self.name)
1688
1688
1689 if entry:
1689 if entry:
1690 if entry.changed():
1690 if entry.changed():
1691 entry.obj = self.func(obj)
1691 entry.obj = self.func(obj)
1692 else:
1692 else:
1693 paths = [self.join(obj, path) for path in self.paths]
1693 paths = [self.join(obj, path) for path in self.paths]
1694
1694
1695 # We stat -before- creating the object so our cache doesn't lie if
1695 # We stat -before- creating the object so our cache doesn't lie if
1696 # a writer modified between the time we read and stat
1696 # a writer modified between the time we read and stat
1697 entry = filecacheentry(paths, True)
1697 entry = filecacheentry(paths, True)
1698 entry.obj = self.func(obj)
1698 entry.obj = self.func(obj)
1699
1699
1700 obj._filecache[self.name] = entry
1700 obj._filecache[self.name] = entry
1701
1701
1702 obj.__dict__[self.sname] = entry.obj
1702 obj.__dict__[self.sname] = entry.obj
1703 return entry.obj
1703 return entry.obj
1704
1704
1705 # don't implement __set__(), which would make __dict__ lookup as slow as
1705 # don't implement __set__(), which would make __dict__ lookup as slow as
1706 # function call.
1706 # function call.
1707
1707
1708 def set(self, obj, value):
1708 def set(self, obj, value):
1709 if self.name not in obj._filecache:
1709 if self.name not in obj._filecache:
1710 # we add an entry for the missing value because X in __dict__
1710 # we add an entry for the missing value because X in __dict__
1711 # implies X in _filecache
1711 # implies X in _filecache
1712 paths = [self.join(obj, path) for path in self.paths]
1712 paths = [self.join(obj, path) for path in self.paths]
1713 ce = filecacheentry(paths, False)
1713 ce = filecacheentry(paths, False)
1714 obj._filecache[self.name] = ce
1714 obj._filecache[self.name] = ce
1715 else:
1715 else:
1716 ce = obj._filecache[self.name]
1716 ce = obj._filecache[self.name]
1717
1717
1718 ce.obj = value # update cached copy
1718 ce.obj = value # update cached copy
1719 obj.__dict__[self.sname] = value # update copy returned by obj.x
1719 obj.__dict__[self.sname] = value # update copy returned by obj.x
1720
1720
1721
1721
1722 def extdatasource(repo, source):
1722 def extdatasource(repo, source):
1723 """Gather a map of rev -> value dict from the specified source
1723 """Gather a map of rev -> value dict from the specified source
1724
1724
1725 A source spec is treated as a URL, with a special case shell: type
1725 A source spec is treated as a URL, with a special case shell: type
1726 for parsing the output from a shell command.
1726 for parsing the output from a shell command.
1727
1727
1728 The data is parsed as a series of newline-separated records where
1728 The data is parsed as a series of newline-separated records where
1729 each record is a revision specifier optionally followed by a space
1729 each record is a revision specifier optionally followed by a space
1730 and a freeform string value. If the revision is known locally, it
1730 and a freeform string value. If the revision is known locally, it
1731 is converted to a rev, otherwise the record is skipped.
1731 is converted to a rev, otherwise the record is skipped.
1732
1732
1733 Note that both key and value are treated as UTF-8 and converted to
1733 Note that both key and value are treated as UTF-8 and converted to
1734 the local encoding. This allows uniformity between local and
1734 the local encoding. This allows uniformity between local and
1735 remote data sources.
1735 remote data sources.
1736 """
1736 """
1737
1737
1738 spec = repo.ui.config(b"extdata", source)
1738 spec = repo.ui.config(b"extdata", source)
1739 if not spec:
1739 if not spec:
1740 raise error.Abort(_(b"unknown extdata source '%s'") % source)
1740 raise error.Abort(_(b"unknown extdata source '%s'") % source)
1741
1741
1742 data = {}
1742 data = {}
1743 src = proc = None
1743 src = proc = None
1744 try:
1744 try:
1745 if spec.startswith(b"shell:"):
1745 if spec.startswith(b"shell:"):
1746 # external commands should be run relative to the repo root
1746 # external commands should be run relative to the repo root
1747 cmd = spec[6:]
1747 cmd = spec[6:]
1748 proc = subprocess.Popen(
1748 proc = subprocess.Popen(
1749 procutil.tonativestr(cmd),
1749 procutil.tonativestr(cmd),
1750 shell=True,
1750 shell=True,
1751 bufsize=-1,
1751 bufsize=-1,
1752 close_fds=procutil.closefds,
1752 close_fds=procutil.closefds,
1753 stdout=subprocess.PIPE,
1753 stdout=subprocess.PIPE,
1754 cwd=procutil.tonativestr(repo.root),
1754 cwd=procutil.tonativestr(repo.root),
1755 )
1755 )
1756 src = proc.stdout
1756 src = proc.stdout
1757 else:
1757 else:
1758 # treat as a URL or file
1758 # treat as a URL or file
1759 src = url.open(repo.ui, spec)
1759 src = url.open(repo.ui, spec)
1760 for l in src:
1760 for l in src:
1761 if b" " in l:
1761 if b" " in l:
1762 k, v = l.strip().split(b" ", 1)
1762 k, v = l.strip().split(b" ", 1)
1763 else:
1763 else:
1764 k, v = l.strip(), b""
1764 k, v = l.strip(), b""
1765
1765
1766 k = encoding.tolocal(k)
1766 k = encoding.tolocal(k)
1767 try:
1767 try:
1768 data[revsingle(repo, k).rev()] = encoding.tolocal(v)
1768 data[revsingle(repo, k).rev()] = encoding.tolocal(v)
1769 except (error.LookupError, error.RepoLookupError, error.InputError):
1769 except (error.LookupError, error.RepoLookupError, error.InputError):
1770 pass # we ignore data for nodes that don't exist locally
1770 pass # we ignore data for nodes that don't exist locally
1771 finally:
1771 finally:
1772 if proc:
1772 if proc:
1773 try:
1773 try:
1774 proc.communicate()
1774 proc.communicate()
1775 except ValueError:
1775 except ValueError:
1776 # This happens if we started iterating src and then
1776 # This happens if we started iterating src and then
1777 # get a parse error on a line. It should be safe to ignore.
1777 # get a parse error on a line. It should be safe to ignore.
1778 pass
1778 pass
1779 if src:
1779 if src:
1780 src.close()
1780 src.close()
1781 if proc and proc.returncode != 0:
1781 if proc and proc.returncode != 0:
1782 raise error.Abort(
1782 raise error.Abort(
1783 _(b"extdata command '%s' failed: %s")
1783 _(b"extdata command '%s' failed: %s")
1784 % (cmd, procutil.explainexit(proc.returncode))
1784 % (cmd, procutil.explainexit(proc.returncode))
1785 )
1785 )
1786
1786
1787 return data
1787 return data
1788
1788
1789
1789
1790 class progress(object):
1790 class progress(object):
1791 def __init__(self, ui, updatebar, topic, unit=b"", total=None):
1791 def __init__(self, ui, updatebar, topic, unit=b"", total=None):
1792 self.ui = ui
1792 self.ui = ui
1793 self.pos = 0
1793 self.pos = 0
1794 self.topic = topic
1794 self.topic = topic
1795 self.unit = unit
1795 self.unit = unit
1796 self.total = total
1796 self.total = total
1797 self.debug = ui.configbool(b'progress', b'debug')
1797 self.debug = ui.configbool(b'progress', b'debug')
1798 self._updatebar = updatebar
1798 self._updatebar = updatebar
1799
1799
1800 def __enter__(self):
1800 def __enter__(self):
1801 return self
1801 return self
1802
1802
1803 def __exit__(self, exc_type, exc_value, exc_tb):
1803 def __exit__(self, exc_type, exc_value, exc_tb):
1804 self.complete()
1804 self.complete()
1805
1805
1806 def update(self, pos, item=b"", total=None):
1806 def update(self, pos, item=b"", total=None):
1807 assert pos is not None
1807 assert pos is not None
1808 if total:
1808 if total:
1809 self.total = total
1809 self.total = total
1810 self.pos = pos
1810 self.pos = pos
1811 self._updatebar(self.topic, self.pos, item, self.unit, self.total)
1811 self._updatebar(self.topic, self.pos, item, self.unit, self.total)
1812 if self.debug:
1812 if self.debug:
1813 self._printdebug(item)
1813 self._printdebug(item)
1814
1814
1815 def increment(self, step=1, item=b"", total=None):
1815 def increment(self, step=1, item=b"", total=None):
1816 self.update(self.pos + step, item, total)
1816 self.update(self.pos + step, item, total)
1817
1817
1818 def complete(self):
1818 def complete(self):
1819 self.pos = None
1819 self.pos = None
1820 self.unit = b""
1820 self.unit = b""
1821 self.total = None
1821 self.total = None
1822 self._updatebar(self.topic, self.pos, b"", self.unit, self.total)
1822 self._updatebar(self.topic, self.pos, b"", self.unit, self.total)
1823
1823
1824 def _printdebug(self, item):
1824 def _printdebug(self, item):
1825 unit = b''
1825 unit = b''
1826 if self.unit:
1826 if self.unit:
1827 unit = b' ' + self.unit
1827 unit = b' ' + self.unit
1828 if item:
1828 if item:
1829 item = b' ' + item
1829 item = b' ' + item
1830
1830
1831 if self.total:
1831 if self.total:
1832 pct = 100.0 * self.pos / self.total
1832 pct = 100.0 * self.pos / self.total
1833 self.ui.debug(
1833 self.ui.debug(
1834 b'%s:%s %d/%d%s (%4.2f%%)\n'
1834 b'%s:%s %d/%d%s (%4.2f%%)\n'
1835 % (self.topic, item, self.pos, self.total, unit, pct)
1835 % (self.topic, item, self.pos, self.total, unit, pct)
1836 )
1836 )
1837 else:
1837 else:
1838 self.ui.debug(b'%s:%s %d%s\n' % (self.topic, item, self.pos, unit))
1838 self.ui.debug(b'%s:%s %d%s\n' % (self.topic, item, self.pos, unit))
1839
1839
1840
1840
1841 def gdinitconfig(ui):
1841 def gdinitconfig(ui):
1842 """helper function to know if a repo should be created as general delta"""
1842 """helper function to know if a repo should be created as general delta"""
1843 # experimental config: format.generaldelta
1843 # experimental config: format.generaldelta
1844 return ui.configbool(b'format', b'generaldelta') or ui.configbool(
1844 return ui.configbool(b'format', b'generaldelta') or ui.configbool(
1845 b'format', b'usegeneraldelta'
1845 b'format', b'usegeneraldelta'
1846 )
1846 )
1847
1847
1848
1848
1849 def gddeltaconfig(ui):
1849 def gddeltaconfig(ui):
1850 """helper function to know if incoming delta should be optimised"""
1850 """helper function to know if incoming delta should be optimised"""
1851 # experimental config: format.generaldelta
1851 # experimental config: format.generaldelta
1852 return ui.configbool(b'format', b'generaldelta')
1852 return ui.configbool(b'format', b'generaldelta')
1853
1853
1854
1854
1855 class simplekeyvaluefile(object):
1855 class simplekeyvaluefile(object):
1856 """A simple file with key=value lines
1856 """A simple file with key=value lines
1857
1857
1858 Keys must be alphanumerics and start with a letter, values must not
1858 Keys must be alphanumerics and start with a letter, values must not
1859 contain '\n' characters"""
1859 contain '\n' characters"""
1860
1860
1861 firstlinekey = b'__firstline'
1861 firstlinekey = b'__firstline'
1862
1862
1863 def __init__(self, vfs, path, keys=None):
1863 def __init__(self, vfs, path, keys=None):
1864 self.vfs = vfs
1864 self.vfs = vfs
1865 self.path = path
1865 self.path = path
1866
1866
1867 def read(self, firstlinenonkeyval=False):
1867 def read(self, firstlinenonkeyval=False):
1868 """Read the contents of a simple key-value file
1868 """Read the contents of a simple key-value file
1869
1869
1870 'firstlinenonkeyval' indicates whether the first line of file should
1870 'firstlinenonkeyval' indicates whether the first line of file should
1871 be treated as a key-value pair or reuturned fully under the
1871 be treated as a key-value pair or reuturned fully under the
1872 __firstline key."""
1872 __firstline key."""
1873 lines = self.vfs.readlines(self.path)
1873 lines = self.vfs.readlines(self.path)
1874 d = {}
1874 d = {}
1875 if firstlinenonkeyval:
1875 if firstlinenonkeyval:
1876 if not lines:
1876 if not lines:
1877 e = _(b"empty simplekeyvalue file")
1877 e = _(b"empty simplekeyvalue file")
1878 raise error.CorruptedState(e)
1878 raise error.CorruptedState(e)
1879 # we don't want to include '\n' in the __firstline
1879 # we don't want to include '\n' in the __firstline
1880 d[self.firstlinekey] = lines[0][:-1]
1880 d[self.firstlinekey] = lines[0][:-1]
1881 del lines[0]
1881 del lines[0]
1882
1882
1883 try:
1883 try:
1884 # the 'if line.strip()' part prevents us from failing on empty
1884 # the 'if line.strip()' part prevents us from failing on empty
1885 # lines which only contain '\n' therefore are not skipped
1885 # lines which only contain '\n' therefore are not skipped
1886 # by 'if line'
1886 # by 'if line'
1887 updatedict = dict(
1887 updatedict = dict(
1888 line[:-1].split(b'=', 1) for line in lines if line.strip()
1888 line[:-1].split(b'=', 1) for line in lines if line.strip()
1889 )
1889 )
1890 if self.firstlinekey in updatedict:
1890 if self.firstlinekey in updatedict:
1891 e = _(b"%r can't be used as a key")
1891 e = _(b"%r can't be used as a key")
1892 raise error.CorruptedState(e % self.firstlinekey)
1892 raise error.CorruptedState(e % self.firstlinekey)
1893 d.update(updatedict)
1893 d.update(updatedict)
1894 except ValueError as e:
1894 except ValueError as e:
1895 raise error.CorruptedState(stringutil.forcebytestr(e))
1895 raise error.CorruptedState(stringutil.forcebytestr(e))
1896 return d
1896 return d
1897
1897
1898 def write(self, data, firstline=None):
1898 def write(self, data, firstline=None):
1899 """Write key=>value mapping to a file
1899 """Write key=>value mapping to a file
1900 data is a dict. Keys must be alphanumerical and start with a letter.
1900 data is a dict. Keys must be alphanumerical and start with a letter.
1901 Values must not contain newline characters.
1901 Values must not contain newline characters.
1902
1902
1903 If 'firstline' is not None, it is written to file before
1903 If 'firstline' is not None, it is written to file before
1904 everything else, as it is, not in a key=value form"""
1904 everything else, as it is, not in a key=value form"""
1905 lines = []
1905 lines = []
1906 if firstline is not None:
1906 if firstline is not None:
1907 lines.append(b'%s\n' % firstline)
1907 lines.append(b'%s\n' % firstline)
1908
1908
1909 for k, v in data.items():
1909 for k, v in data.items():
1910 if k == self.firstlinekey:
1910 if k == self.firstlinekey:
1911 e = b"key name '%s' is reserved" % self.firstlinekey
1911 e = b"key name '%s' is reserved" % self.firstlinekey
1912 raise error.ProgrammingError(e)
1912 raise error.ProgrammingError(e)
1913 if not k[0:1].isalpha():
1913 if not k[0:1].isalpha():
1914 e = b"keys must start with a letter in a key-value file"
1914 e = b"keys must start with a letter in a key-value file"
1915 raise error.ProgrammingError(e)
1915 raise error.ProgrammingError(e)
1916 if not k.isalnum():
1916 if not k.isalnum():
1917 e = b"invalid key name in a simple key-value file"
1917 e = b"invalid key name in a simple key-value file"
1918 raise error.ProgrammingError(e)
1918 raise error.ProgrammingError(e)
1919 if b'\n' in v:
1919 if b'\n' in v:
1920 e = b"invalid value in a simple key-value file"
1920 e = b"invalid value in a simple key-value file"
1921 raise error.ProgrammingError(e)
1921 raise error.ProgrammingError(e)
1922 lines.append(b"%s=%s\n" % (k, v))
1922 lines.append(b"%s=%s\n" % (k, v))
1923 with self.vfs(self.path, mode=b'wb', atomictemp=True) as fp:
1923 with self.vfs(self.path, mode=b'wb', atomictemp=True) as fp:
1924 fp.write(b''.join(lines))
1924 fp.write(b''.join(lines))
1925
1925
1926
1926
1927 _reportobsoletedsource = [
1927 _reportobsoletedsource = [
1928 b'debugobsolete',
1928 b'debugobsolete',
1929 b'pull',
1929 b'pull',
1930 b'push',
1930 b'push',
1931 b'serve',
1931 b'serve',
1932 b'unbundle',
1932 b'unbundle',
1933 ]
1933 ]
1934
1934
1935 _reportnewcssource = [
1935 _reportnewcssource = [
1936 b'pull',
1936 b'pull',
1937 b'unbundle',
1937 b'unbundle',
1938 ]
1938 ]
1939
1939
1940
1940
1941 def prefetchfiles(repo, revmatches):
1941 def prefetchfiles(repo, revmatches):
1942 """Invokes the registered file prefetch functions, allowing extensions to
1942 """Invokes the registered file prefetch functions, allowing extensions to
1943 ensure the corresponding files are available locally, before the command
1943 ensure the corresponding files are available locally, before the command
1944 uses them.
1944 uses them.
1945
1945
1946 Args:
1946 Args:
1947 revmatches: a list of (revision, match) tuples to indicate the files to
1947 revmatches: a list of (revision, match) tuples to indicate the files to
1948 fetch at each revision. If any of the match elements is None, it matches
1948 fetch at each revision. If any of the match elements is None, it matches
1949 all files.
1949 all files.
1950 """
1950 """
1951
1951
1952 def _matcher(m):
1952 def _matcher(m):
1953 if m:
1953 if m:
1954 assert isinstance(m, matchmod.basematcher)
1954 assert isinstance(m, matchmod.basematcher)
1955 # The command itself will complain about files that don't exist, so
1955 # The command itself will complain about files that don't exist, so
1956 # don't duplicate the message.
1956 # don't duplicate the message.
1957 return matchmod.badmatch(m, lambda fn, msg: None)
1957 return matchmod.badmatch(m, lambda fn, msg: None)
1958 else:
1958 else:
1959 return matchall(repo)
1959 return matchall(repo)
1960
1960
1961 revbadmatches = [(rev, _matcher(match)) for (rev, match) in revmatches]
1961 revbadmatches = [(rev, _matcher(match)) for (rev, match) in revmatches]
1962
1962
1963 fileprefetchhooks(repo, revbadmatches)
1963 fileprefetchhooks(repo, revbadmatches)
1964
1964
1965
1965
1966 # a list of (repo, revs, match) prefetch functions
1966 # a list of (repo, revs, match) prefetch functions
1967 fileprefetchhooks = util.hooks()
1967 fileprefetchhooks = util.hooks()
1968
1968
1969 # A marker that tells the evolve extension to suppress its own reporting
1969 # A marker that tells the evolve extension to suppress its own reporting
1970 _reportstroubledchangesets = True
1970 _reportstroubledchangesets = True
1971
1971
1972
1972
1973 def registersummarycallback(repo, otr, txnname=b'', as_validator=False):
1973 def registersummarycallback(repo, otr, txnname=b'', as_validator=False):
1974 """register a callback to issue a summary after the transaction is closed
1974 """register a callback to issue a summary after the transaction is closed
1975
1975
1976 If as_validator is true, then the callbacks are registered as transaction
1976 If as_validator is true, then the callbacks are registered as transaction
1977 validators instead
1977 validators instead
1978 """
1978 """
1979
1979
1980 def txmatch(sources):
1980 def txmatch(sources):
1981 return any(txnname.startswith(source) for source in sources)
1981 return any(txnname.startswith(source) for source in sources)
1982
1982
1983 categories = []
1983 categories = []
1984
1984
1985 def reportsummary(func):
1985 def reportsummary(func):
1986 """decorator for report callbacks."""
1986 """decorator for report callbacks."""
1987 # The repoview life cycle is shorter than the one of the actual
1987 # The repoview life cycle is shorter than the one of the actual
1988 # underlying repository. So the filtered object can die before the
1988 # underlying repository. So the filtered object can die before the
1989 # weakref is used leading to troubles. We keep a reference to the
1989 # weakref is used leading to troubles. We keep a reference to the
1990 # unfiltered object and restore the filtering when retrieving the
1990 # unfiltered object and restore the filtering when retrieving the
1991 # repository through the weakref.
1991 # repository through the weakref.
1992 filtername = repo.filtername
1992 filtername = repo.filtername
1993 reporef = weakref.ref(repo.unfiltered())
1993 reporef = weakref.ref(repo.unfiltered())
1994
1994
1995 def wrapped(tr):
1995 def wrapped(tr):
1996 repo = reporef()
1996 repo = reporef()
1997 if filtername:
1997 if filtername:
1998 assert repo is not None # help pytype
1998 assert repo is not None # help pytype
1999 repo = repo.filtered(filtername)
1999 repo = repo.filtered(filtername)
2000 func(repo, tr)
2000 func(repo, tr)
2001
2001
2002 newcat = b'%02i-txnreport' % len(categories)
2002 newcat = b'%02i-txnreport' % len(categories)
2003 if as_validator:
2003 if as_validator:
2004 otr.addvalidator(newcat, wrapped)
2004 otr.addvalidator(newcat, wrapped)
2005 else:
2005 else:
2006 otr.addpostclose(newcat, wrapped)
2006 otr.addpostclose(newcat, wrapped)
2007 categories.append(newcat)
2007 categories.append(newcat)
2008 return wrapped
2008 return wrapped
2009
2009
2010 @reportsummary
2010 @reportsummary
2011 def reportchangegroup(repo, tr):
2011 def reportchangegroup(repo, tr):
2012 cgchangesets = tr.changes.get(b'changegroup-count-changesets', 0)
2012 cgchangesets = tr.changes.get(b'changegroup-count-changesets', 0)
2013 cgrevisions = tr.changes.get(b'changegroup-count-revisions', 0)
2013 cgrevisions = tr.changes.get(b'changegroup-count-revisions', 0)
2014 cgfiles = tr.changes.get(b'changegroup-count-files', 0)
2014 cgfiles = tr.changes.get(b'changegroup-count-files', 0)
2015 cgheads = tr.changes.get(b'changegroup-count-heads', 0)
2015 cgheads = tr.changes.get(b'changegroup-count-heads', 0)
2016 if cgchangesets or cgrevisions or cgfiles:
2016 if cgchangesets or cgrevisions or cgfiles:
2017 htext = b""
2017 htext = b""
2018 if cgheads:
2018 if cgheads:
2019 htext = _(b" (%+d heads)") % cgheads
2019 htext = _(b" (%+d heads)") % cgheads
2020 msg = _(b"added %d changesets with %d changes to %d files%s\n")
2020 msg = _(b"added %d changesets with %d changes to %d files%s\n")
2021 if as_validator:
2021 if as_validator:
2022 msg = _(b"adding %d changesets with %d changes to %d files%s\n")
2022 msg = _(b"adding %d changesets with %d changes to %d files%s\n")
2023 assert repo is not None # help pytype
2023 assert repo is not None # help pytype
2024 repo.ui.status(msg % (cgchangesets, cgrevisions, cgfiles, htext))
2024 repo.ui.status(msg % (cgchangesets, cgrevisions, cgfiles, htext))
2025
2025
2026 if txmatch(_reportobsoletedsource):
2026 if txmatch(_reportobsoletedsource):
2027
2027
2028 @reportsummary
2028 @reportsummary
2029 def reportobsoleted(repo, tr):
2029 def reportobsoleted(repo, tr):
2030 obsoleted = obsutil.getobsoleted(repo, tr)
2030 obsoleted = obsutil.getobsoleted(repo, tr)
2031 newmarkers = len(tr.changes.get(b'obsmarkers', ()))
2031 newmarkers = len(tr.changes.get(b'obsmarkers', ()))
2032 if newmarkers:
2032 if newmarkers:
2033 repo.ui.status(_(b'%i new obsolescence markers\n') % newmarkers)
2033 repo.ui.status(_(b'%i new obsolescence markers\n') % newmarkers)
2034 if obsoleted:
2034 if obsoleted:
2035 msg = _(b'obsoleted %i changesets\n')
2035 msg = _(b'obsoleted %i changesets\n')
2036 if as_validator:
2036 if as_validator:
2037 msg = _(b'obsoleting %i changesets\n')
2037 msg = _(b'obsoleting %i changesets\n')
2038 repo.ui.status(msg % len(obsoleted))
2038 repo.ui.status(msg % len(obsoleted))
2039
2039
2040 if obsolete.isenabled(
2040 if obsolete.isenabled(
2041 repo, obsolete.createmarkersopt
2041 repo, obsolete.createmarkersopt
2042 ) and repo.ui.configbool(
2042 ) and repo.ui.configbool(
2043 b'experimental', b'evolution.report-instabilities'
2043 b'experimental', b'evolution.report-instabilities'
2044 ):
2044 ):
2045 instabilitytypes = [
2045 instabilitytypes = [
2046 (b'orphan', b'orphan'),
2046 (b'orphan', b'orphan'),
2047 (b'phase-divergent', b'phasedivergent'),
2047 (b'phase-divergent', b'phasedivergent'),
2048 (b'content-divergent', b'contentdivergent'),
2048 (b'content-divergent', b'contentdivergent'),
2049 ]
2049 ]
2050
2050
2051 def getinstabilitycounts(repo):
2051 def getinstabilitycounts(repo):
2052 filtered = repo.changelog.filteredrevs
2052 filtered = repo.changelog.filteredrevs
2053 counts = {}
2053 counts = {}
2054 for instability, revset in instabilitytypes:
2054 for instability, revset in instabilitytypes:
2055 counts[instability] = len(
2055 counts[instability] = len(
2056 set(obsolete.getrevs(repo, revset)) - filtered
2056 set(obsolete.getrevs(repo, revset)) - filtered
2057 )
2057 )
2058 return counts
2058 return counts
2059
2059
2060 oldinstabilitycounts = getinstabilitycounts(repo)
2060 oldinstabilitycounts = getinstabilitycounts(repo)
2061
2061
2062 @reportsummary
2062 @reportsummary
2063 def reportnewinstabilities(repo, tr):
2063 def reportnewinstabilities(repo, tr):
2064 newinstabilitycounts = getinstabilitycounts(repo)
2064 newinstabilitycounts = getinstabilitycounts(repo)
2065 for instability, revset in instabilitytypes:
2065 for instability, revset in instabilitytypes:
2066 delta = (
2066 delta = (
2067 newinstabilitycounts[instability]
2067 newinstabilitycounts[instability]
2068 - oldinstabilitycounts[instability]
2068 - oldinstabilitycounts[instability]
2069 )
2069 )
2070 msg = getinstabilitymessage(delta, instability)
2070 msg = getinstabilitymessage(delta, instability)
2071 if msg:
2071 if msg:
2072 repo.ui.warn(msg)
2072 repo.ui.warn(msg)
2073
2073
2074 if txmatch(_reportnewcssource):
2074 if txmatch(_reportnewcssource):
2075
2075
2076 @reportsummary
2076 @reportsummary
2077 def reportnewcs(repo, tr):
2077 def reportnewcs(repo, tr):
2078 """Report the range of new revisions pulled/unbundled."""
2078 """Report the range of new revisions pulled/unbundled."""
2079 origrepolen = tr.changes.get(b'origrepolen', len(repo))
2079 origrepolen = tr.changes.get(b'origrepolen', len(repo))
2080 unfi = repo.unfiltered()
2080 unfi = repo.unfiltered()
2081 if origrepolen >= len(unfi):
2081 if origrepolen >= len(unfi):
2082 return
2082 return
2083
2083
2084 # Compute the bounds of new visible revisions' range.
2084 # Compute the bounds of new visible revisions' range.
2085 revs = smartset.spanset(repo, start=origrepolen)
2085 revs = smartset.spanset(repo, start=origrepolen)
2086 if revs:
2086 if revs:
2087 minrev, maxrev = repo[revs.min()], repo[revs.max()]
2087 minrev, maxrev = repo[revs.min()], repo[revs.max()]
2088
2088
2089 if minrev == maxrev:
2089 if minrev == maxrev:
2090 revrange = minrev
2090 revrange = minrev
2091 else:
2091 else:
2092 revrange = b'%s:%s' % (minrev, maxrev)
2092 revrange = b'%s:%s' % (minrev, maxrev)
2093 draft = len(repo.revs(b'%ld and draft()', revs))
2093 draft = len(repo.revs(b'%ld and draft()', revs))
2094 secret = len(repo.revs(b'%ld and secret()', revs))
2094 secret = len(repo.revs(b'%ld and secret()', revs))
2095 if not (draft or secret):
2095 if not (draft or secret):
2096 msg = _(b'new changesets %s\n') % revrange
2096 msg = _(b'new changesets %s\n') % revrange
2097 elif draft and secret:
2097 elif draft and secret:
2098 msg = _(b'new changesets %s (%d drafts, %d secrets)\n')
2098 msg = _(b'new changesets %s (%d drafts, %d secrets)\n')
2099 msg %= (revrange, draft, secret)
2099 msg %= (revrange, draft, secret)
2100 elif draft:
2100 elif draft:
2101 msg = _(b'new changesets %s (%d drafts)\n')
2101 msg = _(b'new changesets %s (%d drafts)\n')
2102 msg %= (revrange, draft)
2102 msg %= (revrange, draft)
2103 elif secret:
2103 elif secret:
2104 msg = _(b'new changesets %s (%d secrets)\n')
2104 msg = _(b'new changesets %s (%d secrets)\n')
2105 msg %= (revrange, secret)
2105 msg %= (revrange, secret)
2106 else:
2106 else:
2107 errormsg = b'entered unreachable condition'
2107 errormsg = b'entered unreachable condition'
2108 raise error.ProgrammingError(errormsg)
2108 raise error.ProgrammingError(errormsg)
2109 repo.ui.status(msg)
2109 repo.ui.status(msg)
2110
2110
2111 # search new changesets directly pulled as obsolete
2111 # search new changesets directly pulled as obsolete
2112 duplicates = tr.changes.get(b'revduplicates', ())
2112 duplicates = tr.changes.get(b'revduplicates', ())
2113 obsadded = unfi.revs(
2113 obsadded = unfi.revs(
2114 b'(%d: + %ld) and obsolete()', origrepolen, duplicates
2114 b'(%d: + %ld) and obsolete()', origrepolen, duplicates
2115 )
2115 )
2116 cl = repo.changelog
2116 cl = repo.changelog
2117 extinctadded = [r for r in obsadded if r not in cl]
2117 extinctadded = [r for r in obsadded if r not in cl]
2118 if extinctadded:
2118 if extinctadded:
2119 # They are not just obsolete, but obsolete and invisible
2119 # They are not just obsolete, but obsolete and invisible
2120 # we call them "extinct" internally but the terms have not been
2120 # we call them "extinct" internally but the terms have not been
2121 # exposed to users.
2121 # exposed to users.
2122 msg = b'(%d other changesets obsolete on arrival)\n'
2122 msg = b'(%d other changesets obsolete on arrival)\n'
2123 repo.ui.status(msg % len(extinctadded))
2123 repo.ui.status(msg % len(extinctadded))
2124
2124
2125 @reportsummary
2125 @reportsummary
2126 def reportphasechanges(repo, tr):
2126 def reportphasechanges(repo, tr):
2127 """Report statistics of phase changes for changesets pre-existing
2127 """Report statistics of phase changes for changesets pre-existing
2128 pull/unbundle.
2128 pull/unbundle.
2129 """
2129 """
2130 origrepolen = tr.changes.get(b'origrepolen', len(repo))
2130 origrepolen = tr.changes.get(b'origrepolen', len(repo))
2131 published = []
2131 published = []
2132 for revs, (old, new) in tr.changes.get(b'phases', []):
2132 for revs, (old, new) in tr.changes.get(b'phases', []):
2133 if new != phases.public:
2133 if new != phases.public:
2134 continue
2134 continue
2135 published.extend(rev for rev in revs if rev < origrepolen)
2135 published.extend(rev for rev in revs if rev < origrepolen)
2136 if not published:
2136 if not published:
2137 return
2137 return
2138 msg = _(b'%d local changesets published\n')
2138 msg = _(b'%d local changesets published\n')
2139 if as_validator:
2139 if as_validator:
2140 msg = _(b'%d local changesets will be published\n')
2140 msg = _(b'%d local changesets will be published\n')
2141 repo.ui.status(msg % len(published))
2141 repo.ui.status(msg % len(published))
2142
2142
2143
2143
2144 def getinstabilitymessage(delta, instability):
2144 def getinstabilitymessage(delta, instability):
2145 """function to return the message to show warning about new instabilities
2145 """function to return the message to show warning about new instabilities
2146
2146
2147 exists as a separate function so that extension can wrap to show more
2147 exists as a separate function so that extension can wrap to show more
2148 information like how to fix instabilities"""
2148 information like how to fix instabilities"""
2149 if delta > 0:
2149 if delta > 0:
2150 return _(b'%i new %s changesets\n') % (delta, instability)
2150 return _(b'%i new %s changesets\n') % (delta, instability)
2151
2151
2152
2152
2153 def nodesummaries(repo, nodes, maxnumnodes=4):
2153 def nodesummaries(repo, nodes, maxnumnodes=4):
2154 if len(nodes) <= maxnumnodes or repo.ui.verbose:
2154 if len(nodes) <= maxnumnodes or repo.ui.verbose:
2155 return b' '.join(short(h) for h in nodes)
2155 return b' '.join(short(h) for h in nodes)
2156 first = b' '.join(short(h) for h in nodes[:maxnumnodes])
2156 first = b' '.join(short(h) for h in nodes[:maxnumnodes])
2157 return _(b"%s and %d others") % (first, len(nodes) - maxnumnodes)
2157 return _(b"%s and %d others") % (first, len(nodes) - maxnumnodes)
2158
2158
2159
2159
2160 def enforcesinglehead(repo, tr, desc, accountclosed, filtername):
2160 def enforcesinglehead(repo, tr, desc, accountclosed, filtername):
2161 """check that no named branch has multiple heads"""
2161 """check that no named branch has multiple heads"""
2162 if desc in (b'strip', b'repair'):
2162 if desc in (b'strip', b'repair'):
2163 # skip the logic during strip
2163 # skip the logic during strip
2164 return
2164 return
2165 visible = repo.filtered(filtername)
2165 visible = repo.filtered(filtername)
2166 # possible improvement: we could restrict the check to affected branch
2166 # possible improvement: we could restrict the check to affected branch
2167 bm = visible.branchmap()
2167 bm = visible.branchmap()
2168 for name in bm:
2168 for name in bm:
2169 heads = bm.branchheads(name, closed=accountclosed)
2169 heads = bm.branchheads(name, closed=accountclosed)
2170 if len(heads) > 1:
2170 if len(heads) > 1:
2171 msg = _(b'rejecting multiple heads on branch "%s"')
2171 msg = _(b'rejecting multiple heads on branch "%s"')
2172 msg %= name
2172 msg %= name
2173 hint = _(b'%d heads: %s')
2173 hint = _(b'%d heads: %s')
2174 hint %= (len(heads), nodesummaries(repo, heads))
2174 hint %= (len(heads), nodesummaries(repo, heads))
2175 raise error.Abort(msg, hint=hint)
2175 raise error.Abort(msg, hint=hint)
2176
2176
2177
2177
2178 def wrapconvertsink(sink):
2178 def wrapconvertsink(sink):
2179 """Allow extensions to wrap the sink returned by convcmd.convertsink()
2179 """Allow extensions to wrap the sink returned by convcmd.convertsink()
2180 before it is used, whether or not the convert extension was formally loaded.
2180 before it is used, whether or not the convert extension was formally loaded.
2181 """
2181 """
2182 return sink
2182 return sink
2183
2183
2184
2184
2185 def unhidehashlikerevs(repo, specs, hiddentype):
2185 def unhidehashlikerevs(repo, specs, hiddentype):
2186 """parse the user specs and unhide changesets whose hash or revision number
2186 """parse the user specs and unhide changesets whose hash or revision number
2187 is passed.
2187 is passed.
2188
2188
2189 hiddentype can be: 1) 'warn': warn while unhiding changesets
2189 hiddentype can be: 1) 'warn': warn while unhiding changesets
2190 2) 'nowarn': don't warn while unhiding changesets
2190 2) 'nowarn': don't warn while unhiding changesets
2191
2191
2192 returns a repo object with the required changesets unhidden
2192 returns a repo object with the required changesets unhidden
2193 """
2193 """
2194 if not repo.filtername or not repo.ui.configbool(
2194 if not repo.filtername or not repo.ui.configbool(
2195 b'experimental', b'directaccess'
2195 b'experimental', b'directaccess'
2196 ):
2196 ):
2197 return repo
2197 return repo
2198
2198
2199 if repo.filtername not in (b'visible', b'visible-hidden'):
2199 if repo.filtername not in (b'visible', b'visible-hidden'):
2200 return repo
2200 return repo
2201
2201
2202 symbols = set()
2202 symbols = set()
2203 for spec in specs:
2203 for spec in specs:
2204 try:
2204 try:
2205 tree = revsetlang.parse(spec)
2205 tree = revsetlang.parse(spec)
2206 except error.ParseError: # will be reported by scmutil.revrange()
2206 except error.ParseError: # will be reported by scmutil.revrange()
2207 continue
2207 continue
2208
2208
2209 symbols.update(revsetlang.gethashlikesymbols(tree))
2209 symbols.update(revsetlang.gethashlikesymbols(tree))
2210
2210
2211 if not symbols:
2211 if not symbols:
2212 return repo
2212 return repo
2213
2213
2214 revs = _getrevsfromsymbols(repo, symbols)
2214 revs = _getrevsfromsymbols(repo, symbols)
2215
2215
2216 if not revs:
2216 if not revs:
2217 return repo
2217 return repo
2218
2218
2219 if hiddentype == b'warn':
2219 if hiddentype == b'warn':
2220 unfi = repo.unfiltered()
2220 unfi = repo.unfiltered()
2221 revstr = b", ".join([pycompat.bytestr(unfi[l]) for l in revs])
2221 revstr = b", ".join([pycompat.bytestr(unfi[l]) for l in revs])
2222 repo.ui.warn(
2222 repo.ui.warn(
2223 _(
2223 _(
2224 b"warning: accessing hidden changesets for write "
2224 b"warning: accessing hidden changesets for write "
2225 b"operation: %s\n"
2225 b"operation: %s\n"
2226 )
2226 )
2227 % revstr
2227 % revstr
2228 )
2228 )
2229
2229
2230 # we have to use new filtername to separate branch/tags cache until we can
2230 # we have to use new filtername to separate branch/tags cache until we can
2231 # disbale these cache when revisions are dynamically pinned.
2231 # disbale these cache when revisions are dynamically pinned.
2232 return repo.filtered(b'visible-hidden', revs)
2232 return repo.filtered(b'visible-hidden', revs)
2233
2233
2234
2234
2235 def _getrevsfromsymbols(repo, symbols):
2235 def _getrevsfromsymbols(repo, symbols):
2236 """parse the list of symbols and returns a set of revision numbers of hidden
2236 """parse the list of symbols and returns a set of revision numbers of hidden
2237 changesets present in symbols"""
2237 changesets present in symbols"""
2238 revs = set()
2238 revs = set()
2239 unfi = repo.unfiltered()
2239 unfi = repo.unfiltered()
2240 unficl = unfi.changelog
2240 unficl = unfi.changelog
2241 cl = repo.changelog
2241 cl = repo.changelog
2242 tiprev = len(unficl)
2242 tiprev = len(unficl)
2243 allowrevnums = repo.ui.configbool(b'experimental', b'directaccess.revnums')
2243 allowrevnums = repo.ui.configbool(b'experimental', b'directaccess.revnums')
2244 for s in symbols:
2244 for s in symbols:
2245 try:
2245 try:
2246 n = int(s)
2246 n = int(s)
2247 if n <= tiprev:
2247 if n <= tiprev:
2248 if not allowrevnums:
2248 if not allowrevnums:
2249 continue
2249 continue
2250 else:
2250 else:
2251 if n not in cl:
2251 if n not in cl:
2252 revs.add(n)
2252 revs.add(n)
2253 continue
2253 continue
2254 except ValueError:
2254 except ValueError:
2255 pass
2255 pass
2256
2256
2257 try:
2257 try:
2258 s = resolvehexnodeidprefix(unfi, s)
2258 s = resolvehexnodeidprefix(unfi, s)
2259 except (error.LookupError, error.WdirUnsupported):
2259 except (error.LookupError, error.WdirUnsupported):
2260 s = None
2260 s = None
2261
2261
2262 if s is not None:
2262 if s is not None:
2263 rev = unficl.rev(s)
2263 rev = unficl.rev(s)
2264 if rev not in cl:
2264 if rev not in cl:
2265 revs.add(rev)
2265 revs.add(rev)
2266
2266
2267 return revs
2267 return revs
2268
2268
2269
2269
2270 def bookmarkrevs(repo, mark):
2270 def bookmarkrevs(repo, mark):
2271 """Select revisions reachable by a given bookmark
2271 """Select revisions reachable by a given bookmark
2272
2272
2273 If the bookmarked revision isn't a head, an empty set will be returned.
2273 If the bookmarked revision isn't a head, an empty set will be returned.
2274 """
2274 """
2275 return repo.revs(format_bookmark_revspec(mark))
2275 return repo.revs(format_bookmark_revspec(mark))
2276
2276
2277
2277
2278 def format_bookmark_revspec(mark):
2278 def format_bookmark_revspec(mark):
2279 """Build a revset expression to select revisions reachable by a given
2279 """Build a revset expression to select revisions reachable by a given
2280 bookmark"""
2280 bookmark"""
2281 mark = b'literal:' + mark
2281 mark = b'literal:' + mark
2282 return revsetlang.formatspec(
2282 return revsetlang.formatspec(
2283 b"ancestors(bookmark(%s)) - "
2283 b"ancestors(bookmark(%s)) - "
2284 b"ancestors(head() and not bookmark(%s)) - "
2284 b"ancestors(head() and not bookmark(%s)) - "
2285 b"ancestors(bookmark() and not bookmark(%s))",
2285 b"ancestors(bookmark() and not bookmark(%s))",
2286 mark,
2286 mark,
2287 mark,
2287 mark,
2288 mark,
2288 mark,
2289 )
2289 )
@@ -1,174 +1,174 b''
1 $ hg init rep; cd rep
1 $ hg init rep; cd rep
2
2
3 $ touch empty-file
3 $ touch empty-file
4 $ "$PYTHON" -c 'for x in range(10000): print(x)' > large-file
4 $ "$PYTHON" -c 'for x in range(10000): print(x)' > large-file
5
5
6 $ hg addremove
6 $ hg addremove
7 adding empty-file
7 adding empty-file
8 adding large-file
8 adding large-file
9
9
10 $ hg commit -m A
10 $ hg commit -m A
11
11
12 $ rm large-file empty-file
12 $ rm large-file empty-file
13 $ "$PYTHON" -c 'for x in range(10,10000): print(x)' > another-file
13 $ "$PYTHON" -c 'for x in range(10,10000): print(x)' > another-file
14
14
15 $ hg addremove -s50
15 $ hg addremove -s50
16 adding another-file
16 adding another-file
17 removing empty-file
17 removing empty-file
18 removing large-file
18 removing large-file
19 recording removal of large-file as rename to another-file (99% similar)
19 recording removal of large-file as rename to another-file (99% similar)
20
20
21 $ hg commit -m B
21 $ hg commit -m B
22
22
23 comparing two empty files caused ZeroDivisionError in the past
23 comparing two empty files caused ZeroDivisionError in the past
24
24
25 $ hg update -C 0
25 $ hg update -C 0
26 2 files updated, 0 files merged, 1 files removed, 0 files unresolved
26 2 files updated, 0 files merged, 1 files removed, 0 files unresolved
27 $ rm empty-file
27 $ rm empty-file
28 $ touch another-empty-file
28 $ touch another-empty-file
29 $ hg addremove -s50
29 $ hg addremove -s50
30 adding another-empty-file
30 adding another-empty-file
31 removing empty-file
31 removing empty-file
32
32
33 $ cd ..
33 $ cd ..
34
34
35 $ hg init rep2; cd rep2
35 $ hg init rep2; cd rep2
36
36
37 $ "$PYTHON" -c 'for x in range(10000): print(x)' > large-file
37 $ "$PYTHON" -c 'for x in range(10000): print(x)' > large-file
38 $ "$PYTHON" -c 'for x in range(50): print(x)' > tiny-file
38 $ "$PYTHON" -c 'for x in range(50): print(x)' > tiny-file
39
39
40 $ hg addremove
40 $ hg addremove
41 adding large-file
41 adding large-file
42 adding tiny-file
42 adding tiny-file
43
43
44 $ hg commit -m A
44 $ hg commit -m A
45
45
46 $ "$PYTHON" -c 'for x in range(70): print(x)' > small-file
46 $ "$PYTHON" -c 'for x in range(70): print(x)' > small-file
47 $ rm tiny-file
47 $ rm tiny-file
48 $ rm large-file
48 $ rm large-file
49
49
50 $ hg addremove -s50
50 $ hg addremove -s50
51 removing large-file
51 removing large-file
52 adding small-file
52 adding small-file
53 removing tiny-file
53 removing tiny-file
54 recording removal of tiny-file as rename to small-file (82% similar)
54 recording removal of tiny-file as rename to small-file (82% similar)
55
55
56 $ hg commit -m B
56 $ hg commit -m B
57
57
58 should be sorted by path for stable result
58 should be sorted by path for stable result
59
59
60 $ for i in `"$PYTHON" $TESTDIR/seq.py 0 9`; do
60 $ for i in `"$PYTHON" $TESTDIR/seq.py 0 9`; do
61 > cp small-file $i
61 > cp small-file $i
62 > done
62 > done
63 $ rm small-file
63 $ rm small-file
64 $ hg addremove
64 $ hg addremove
65 adding 0
65 adding 0
66 adding 1
66 adding 1
67 adding 2
67 adding 2
68 adding 3
68 adding 3
69 adding 4
69 adding 4
70 adding 5
70 adding 5
71 adding 6
71 adding 6
72 adding 7
72 adding 7
73 adding 8
73 adding 8
74 adding 9
74 adding 9
75 removing small-file
75 removing small-file
76 recording removal of small-file as rename to 0 (100% similar)
76 recording removal of small-file as rename to 0 (100% similar)
77 recording removal of small-file as rename to 1 (100% similar)
77 recording removal of small-file as rename to 1 (100% similar)
78 recording removal of small-file as rename to 2 (100% similar)
78 recording removal of small-file as rename to 2 (100% similar)
79 recording removal of small-file as rename to 3 (100% similar)
79 recording removal of small-file as rename to 3 (100% similar)
80 recording removal of small-file as rename to 4 (100% similar)
80 recording removal of small-file as rename to 4 (100% similar)
81 recording removal of small-file as rename to 5 (100% similar)
81 recording removal of small-file as rename to 5 (100% similar)
82 recording removal of small-file as rename to 6 (100% similar)
82 recording removal of small-file as rename to 6 (100% similar)
83 recording removal of small-file as rename to 7 (100% similar)
83 recording removal of small-file as rename to 7 (100% similar)
84 recording removal of small-file as rename to 8 (100% similar)
84 recording removal of small-file as rename to 8 (100% similar)
85 recording removal of small-file as rename to 9 (100% similar)
85 recording removal of small-file as rename to 9 (100% similar)
86 $ hg commit -m '10 same files'
86 $ hg commit -m '10 same files'
87
87
88 pick one from many identical files
88 pick one from many identical files
89
89
90 $ cp 0 a
90 $ cp 0 a
91 $ rm `"$PYTHON" $TESTDIR/seq.py 0 9`
91 $ rm `"$PYTHON" $TESTDIR/seq.py 0 9`
92 $ hg addremove
92 $ hg addremove
93 removing 0
93 removing 0
94 removing 1
94 removing 1
95 removing 2
95 removing 2
96 removing 3
96 removing 3
97 removing 4
97 removing 4
98 removing 5
98 removing 5
99 removing 6
99 removing 6
100 removing 7
100 removing 7
101 removing 8
101 removing 8
102 removing 9
102 removing 9
103 adding a
103 adding a
104 recording removal of 0 as rename to a (100% similar)
104 recording removal of 0 as rename to a (100% similar)
105 $ hg revert -aq
105 $ hg revert -aq
106
106
107 pick one from many similar files
107 pick one from many similar files
108
108
109 $ cp 0 a
109 $ cp 0 a
110 $ for i in `"$PYTHON" $TESTDIR/seq.py 0 9`; do
110 $ for i in `"$PYTHON" $TESTDIR/seq.py 0 9`; do
111 > echo $i >> $i
111 > echo $i >> $i
112 > done
112 > done
113 $ hg commit -m 'make them slightly different'
113 $ hg commit -m 'make them slightly different'
114 $ rm `"$PYTHON" $TESTDIR/seq.py 0 9`
114 $ rm `"$PYTHON" $TESTDIR/seq.py 0 9`
115 $ hg addremove -s50
115 $ hg addremove -s50
116 removing 0
116 removing 0
117 removing 1
117 removing 1
118 removing 2
118 removing 2
119 removing 3
119 removing 3
120 removing 4
120 removing 4
121 removing 5
121 removing 5
122 removing 6
122 removing 6
123 removing 7
123 removing 7
124 removing 8
124 removing 8
125 removing 9
125 removing 9
126 adding a
126 adding a
127 recording removal of 0 as rename to a (99% similar)
127 recording removal of 0 as rename to a (99% similar)
128 $ hg commit -m 'always the same file should be selected'
128 $ hg commit -m 'always the same file should be selected'
129
129
130 should all fail
130 should all fail
131
131
132 $ hg addremove -s foo
132 $ hg addremove -s foo
133 abort: similarity must be a number
133 abort: similarity must be a number
134 [255]
134 [10]
135 $ hg addremove -s -1
135 $ hg addremove -s -1
136 abort: similarity must be between 0 and 100
136 abort: similarity must be between 0 and 100
137 [255]
137 [10]
138 $ hg addremove -s 1e6
138 $ hg addremove -s 1e6
139 abort: similarity must be between 0 and 100
139 abort: similarity must be between 0 and 100
140 [255]
140 [10]
141
141
142 $ cd ..
142 $ cd ..
143
143
144 Issue1527: repeated addremove causes Abort
144 Issue1527: repeated addremove causes Abort
145
145
146 $ hg init rep3; cd rep3
146 $ hg init rep3; cd rep3
147 $ mkdir d
147 $ mkdir d
148 $ echo a > d/a
148 $ echo a > d/a
149 $ hg add d/a
149 $ hg add d/a
150 $ hg commit -m 1
150 $ hg commit -m 1
151
151
152 $ mv d/a d/b
152 $ mv d/a d/b
153 $ hg addremove -s80
153 $ hg addremove -s80
154 removing d/a
154 removing d/a
155 adding d/b
155 adding d/b
156 recording removal of d/a as rename to d/b (100% similar)
156 recording removal of d/a as rename to d/b (100% similar)
157 $ hg debugstate
157 $ hg debugstate
158 r 0 0 1970-01-01 00:00:00 d/a
158 r 0 0 1970-01-01 00:00:00 d/a
159 a 0 -1 unset d/b
159 a 0 -1 unset d/b
160 copy: d/a -> d/b
160 copy: d/a -> d/b
161 $ mv d/b c
161 $ mv d/b c
162
162
163 no copies found here (since the target isn't in d
163 no copies found here (since the target isn't in d
164
164
165 $ hg addremove -s80 d
165 $ hg addremove -s80 d
166 removing d/b
166 removing d/b
167
167
168 copies here
168 copies here
169
169
170 $ hg addremove -s80
170 $ hg addremove -s80
171 adding c
171 adding c
172 recording removal of d/a as rename to c (100% similar)
172 recording removal of d/a as rename to c (100% similar)
173
173
174 $ cd ..
174 $ cd ..
General Comments 0
You need to be logged in to leave comments. Login now