##// END OF EJS Templates
errors: use detailed exit code for detected case-collision...
Martin von Zweigbergk -
r49190:0b8e076e default
parent child Browse files
Show More
@@ -1,2293 +1,2293
1 # scmutil.py - Mercurial core utility functions
1 # scmutil.py - Mercurial core utility functions
2 #
2 #
3 # Copyright Olivia Mackall <olivia@selenic.com>
3 # Copyright Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import glob
11 import glob
12 import os
12 import os
13 import posixpath
13 import posixpath
14 import re
14 import re
15 import subprocess
15 import subprocess
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 bin,
20 bin,
21 hex,
21 hex,
22 nullrev,
22 nullrev,
23 short,
23 short,
24 wdirrev,
24 wdirrev,
25 )
25 )
26 from .pycompat import getattr
26 from .pycompat import getattr
27 from .thirdparty import attr
27 from .thirdparty import attr
28 from . import (
28 from . import (
29 copies as copiesmod,
29 copies as copiesmod,
30 encoding,
30 encoding,
31 error,
31 error,
32 match as matchmod,
32 match as matchmod,
33 obsolete,
33 obsolete,
34 obsutil,
34 obsutil,
35 pathutil,
35 pathutil,
36 phases,
36 phases,
37 policy,
37 policy,
38 pycompat,
38 pycompat,
39 requirements as requirementsmod,
39 requirements as requirementsmod,
40 revsetlang,
40 revsetlang,
41 similar,
41 similar,
42 smartset,
42 smartset,
43 url,
43 url,
44 util,
44 util,
45 vfs,
45 vfs,
46 )
46 )
47
47
48 from .utils import (
48 from .utils import (
49 hashutil,
49 hashutil,
50 procutil,
50 procutil,
51 stringutil,
51 stringutil,
52 )
52 )
53
53
54 if pycompat.iswindows:
54 if pycompat.iswindows:
55 from . import scmwindows as scmplatform
55 from . import scmwindows as scmplatform
56 else:
56 else:
57 from . import scmposix as scmplatform
57 from . import scmposix as scmplatform
58
58
59 parsers = policy.importmod('parsers')
59 parsers = policy.importmod('parsers')
60 rustrevlog = policy.importrust('revlog')
60 rustrevlog = policy.importrust('revlog')
61
61
62 termsize = scmplatform.termsize
62 termsize = scmplatform.termsize
63
63
64
64
65 @attr.s(slots=True, repr=False)
65 @attr.s(slots=True, repr=False)
66 class status(object):
66 class status(object):
67 """Struct with a list of files per status.
67 """Struct with a list of files per status.
68
68
69 The 'deleted', 'unknown' and 'ignored' properties are only
69 The 'deleted', 'unknown' and 'ignored' properties are only
70 relevant to the working copy.
70 relevant to the working copy.
71 """
71 """
72
72
73 modified = attr.ib(default=attr.Factory(list))
73 modified = attr.ib(default=attr.Factory(list))
74 added = attr.ib(default=attr.Factory(list))
74 added = attr.ib(default=attr.Factory(list))
75 removed = attr.ib(default=attr.Factory(list))
75 removed = attr.ib(default=attr.Factory(list))
76 deleted = attr.ib(default=attr.Factory(list))
76 deleted = attr.ib(default=attr.Factory(list))
77 unknown = attr.ib(default=attr.Factory(list))
77 unknown = attr.ib(default=attr.Factory(list))
78 ignored = attr.ib(default=attr.Factory(list))
78 ignored = attr.ib(default=attr.Factory(list))
79 clean = attr.ib(default=attr.Factory(list))
79 clean = attr.ib(default=attr.Factory(list))
80
80
81 def __iter__(self):
81 def __iter__(self):
82 yield self.modified
82 yield self.modified
83 yield self.added
83 yield self.added
84 yield self.removed
84 yield self.removed
85 yield self.deleted
85 yield self.deleted
86 yield self.unknown
86 yield self.unknown
87 yield self.ignored
87 yield self.ignored
88 yield self.clean
88 yield self.clean
89
89
90 def __repr__(self):
90 def __repr__(self):
91 return (
91 return (
92 r'<status modified=%s, added=%s, removed=%s, deleted=%s, '
92 r'<status modified=%s, added=%s, removed=%s, deleted=%s, '
93 r'unknown=%s, ignored=%s, clean=%s>'
93 r'unknown=%s, ignored=%s, clean=%s>'
94 ) % tuple(pycompat.sysstr(stringutil.pprint(v)) for v in self)
94 ) % tuple(pycompat.sysstr(stringutil.pprint(v)) for v in self)
95
95
96
96
97 def itersubrepos(ctx1, ctx2):
97 def itersubrepos(ctx1, ctx2):
98 """find subrepos in ctx1 or ctx2"""
98 """find subrepos in ctx1 or ctx2"""
99 # Create a (subpath, ctx) mapping where we prefer subpaths from
99 # Create a (subpath, ctx) mapping where we prefer subpaths from
100 # ctx1. The subpaths from ctx2 are important when the .hgsub file
100 # ctx1. The subpaths from ctx2 are important when the .hgsub file
101 # has been modified (in ctx2) but not yet committed (in ctx1).
101 # has been modified (in ctx2) but not yet committed (in ctx1).
102 subpaths = dict.fromkeys(ctx2.substate, ctx2)
102 subpaths = dict.fromkeys(ctx2.substate, ctx2)
103 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
103 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
104
104
105 missing = set()
105 missing = set()
106
106
107 for subpath in ctx2.substate:
107 for subpath in ctx2.substate:
108 if subpath not in ctx1.substate:
108 if subpath not in ctx1.substate:
109 del subpaths[subpath]
109 del subpaths[subpath]
110 missing.add(subpath)
110 missing.add(subpath)
111
111
112 for subpath, ctx in sorted(pycompat.iteritems(subpaths)):
112 for subpath, ctx in sorted(pycompat.iteritems(subpaths)):
113 yield subpath, ctx.sub(subpath)
113 yield subpath, ctx.sub(subpath)
114
114
115 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
115 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
116 # status and diff will have an accurate result when it does
116 # status and diff will have an accurate result when it does
117 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
117 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
118 # against itself.
118 # against itself.
119 for subpath in missing:
119 for subpath in missing:
120 yield subpath, ctx2.nullsub(subpath, ctx1)
120 yield subpath, ctx2.nullsub(subpath, ctx1)
121
121
122
122
123 def nochangesfound(ui, repo, excluded=None):
123 def nochangesfound(ui, repo, excluded=None):
124 """Report no changes for push/pull, excluded is None or a list of
124 """Report no changes for push/pull, excluded is None or a list of
125 nodes excluded from the push/pull.
125 nodes excluded from the push/pull.
126 """
126 """
127 secretlist = []
127 secretlist = []
128 if excluded:
128 if excluded:
129 for n in excluded:
129 for n in excluded:
130 ctx = repo[n]
130 ctx = repo[n]
131 if ctx.phase() >= phases.secret and not ctx.extinct():
131 if ctx.phase() >= phases.secret and not ctx.extinct():
132 secretlist.append(n)
132 secretlist.append(n)
133
133
134 if secretlist:
134 if secretlist:
135 ui.status(
135 ui.status(
136 _(b"no changes found (ignored %d secret changesets)\n")
136 _(b"no changes found (ignored %d secret changesets)\n")
137 % len(secretlist)
137 % len(secretlist)
138 )
138 )
139 else:
139 else:
140 ui.status(_(b"no changes found\n"))
140 ui.status(_(b"no changes found\n"))
141
141
142
142
143 def callcatch(ui, func):
143 def callcatch(ui, func):
144 """call func() with global exception handling
144 """call func() with global exception handling
145
145
146 return func() if no exception happens. otherwise do some error handling
146 return func() if no exception happens. otherwise do some error handling
147 and return an exit code accordingly. does not handle all exceptions.
147 and return an exit code accordingly. does not handle all exceptions.
148 """
148 """
149 coarse_exit_code = -1
149 coarse_exit_code = -1
150 detailed_exit_code = -1
150 detailed_exit_code = -1
151 try:
151 try:
152 try:
152 try:
153 return func()
153 return func()
154 except: # re-raises
154 except: # re-raises
155 ui.traceback()
155 ui.traceback()
156 raise
156 raise
157 # Global exception handling, alphabetically
157 # Global exception handling, alphabetically
158 # Mercurial-specific first, followed by built-in and library exceptions
158 # Mercurial-specific first, followed by built-in and library exceptions
159 except error.LockHeld as inst:
159 except error.LockHeld as inst:
160 detailed_exit_code = 20
160 detailed_exit_code = 20
161 if inst.errno == errno.ETIMEDOUT:
161 if inst.errno == errno.ETIMEDOUT:
162 reason = _(b'timed out waiting for lock held by %r') % (
162 reason = _(b'timed out waiting for lock held by %r') % (
163 pycompat.bytestr(inst.locker)
163 pycompat.bytestr(inst.locker)
164 )
164 )
165 else:
165 else:
166 reason = _(b'lock held by %r') % inst.locker
166 reason = _(b'lock held by %r') % inst.locker
167 ui.error(
167 ui.error(
168 _(b"abort: %s: %s\n")
168 _(b"abort: %s: %s\n")
169 % (inst.desc or stringutil.forcebytestr(inst.filename), reason)
169 % (inst.desc or stringutil.forcebytestr(inst.filename), reason)
170 )
170 )
171 if not inst.locker:
171 if not inst.locker:
172 ui.error(_(b"(lock might be very busy)\n"))
172 ui.error(_(b"(lock might be very busy)\n"))
173 except error.LockUnavailable as inst:
173 except error.LockUnavailable as inst:
174 detailed_exit_code = 20
174 detailed_exit_code = 20
175 ui.error(
175 ui.error(
176 _(b"abort: could not lock %s: %s\n")
176 _(b"abort: could not lock %s: %s\n")
177 % (
177 % (
178 inst.desc or stringutil.forcebytestr(inst.filename),
178 inst.desc or stringutil.forcebytestr(inst.filename),
179 encoding.strtolocal(inst.strerror),
179 encoding.strtolocal(inst.strerror),
180 )
180 )
181 )
181 )
182 except error.RepoError as inst:
182 except error.RepoError as inst:
183 ui.error(_(b"abort: %s\n") % inst)
183 ui.error(_(b"abort: %s\n") % inst)
184 if inst.hint:
184 if inst.hint:
185 ui.error(_(b"(%s)\n") % inst.hint)
185 ui.error(_(b"(%s)\n") % inst.hint)
186 except error.ResponseError as inst:
186 except error.ResponseError as inst:
187 ui.error(_(b"abort: %s") % inst.args[0])
187 ui.error(_(b"abort: %s") % inst.args[0])
188 msg = inst.args[1]
188 msg = inst.args[1]
189 if isinstance(msg, type(u'')):
189 if isinstance(msg, type(u'')):
190 msg = pycompat.sysbytes(msg)
190 msg = pycompat.sysbytes(msg)
191 if msg is None:
191 if msg is None:
192 ui.error(b"\n")
192 ui.error(b"\n")
193 elif not isinstance(msg, bytes):
193 elif not isinstance(msg, bytes):
194 ui.error(b" %r\n" % (msg,))
194 ui.error(b" %r\n" % (msg,))
195 elif not msg:
195 elif not msg:
196 ui.error(_(b" empty string\n"))
196 ui.error(_(b" empty string\n"))
197 else:
197 else:
198 ui.error(b"\n%r\n" % pycompat.bytestr(stringutil.ellipsis(msg)))
198 ui.error(b"\n%r\n" % pycompat.bytestr(stringutil.ellipsis(msg)))
199 except error.CensoredNodeError as inst:
199 except error.CensoredNodeError as inst:
200 ui.error(_(b"abort: file censored %s\n") % inst)
200 ui.error(_(b"abort: file censored %s\n") % inst)
201 except error.WdirUnsupported:
201 except error.WdirUnsupported:
202 ui.error(_(b"abort: working directory revision cannot be specified\n"))
202 ui.error(_(b"abort: working directory revision cannot be specified\n"))
203 except error.Error as inst:
203 except error.Error as inst:
204 if inst.detailed_exit_code is not None:
204 if inst.detailed_exit_code is not None:
205 detailed_exit_code = inst.detailed_exit_code
205 detailed_exit_code = inst.detailed_exit_code
206 if inst.coarse_exit_code is not None:
206 if inst.coarse_exit_code is not None:
207 coarse_exit_code = inst.coarse_exit_code
207 coarse_exit_code = inst.coarse_exit_code
208 ui.error(inst.format())
208 ui.error(inst.format())
209 except error.WorkerError as inst:
209 except error.WorkerError as inst:
210 # Don't print a message -- the worker already should have
210 # Don't print a message -- the worker already should have
211 return inst.status_code
211 return inst.status_code
212 except ImportError as inst:
212 except ImportError as inst:
213 ui.error(_(b"abort: %s\n") % stringutil.forcebytestr(inst))
213 ui.error(_(b"abort: %s\n") % stringutil.forcebytestr(inst))
214 m = stringutil.forcebytestr(inst).split()[-1]
214 m = stringutil.forcebytestr(inst).split()[-1]
215 if m in b"mpatch bdiff".split():
215 if m in b"mpatch bdiff".split():
216 ui.error(_(b"(did you forget to compile extensions?)\n"))
216 ui.error(_(b"(did you forget to compile extensions?)\n"))
217 elif m in b"zlib".split():
217 elif m in b"zlib".split():
218 ui.error(_(b"(is your Python install correct?)\n"))
218 ui.error(_(b"(is your Python install correct?)\n"))
219 except util.urlerr.httperror as inst:
219 except util.urlerr.httperror as inst:
220 detailed_exit_code = 100
220 detailed_exit_code = 100
221 ui.error(_(b"abort: %s\n") % stringutil.forcebytestr(inst))
221 ui.error(_(b"abort: %s\n") % stringutil.forcebytestr(inst))
222 except util.urlerr.urlerror as inst:
222 except util.urlerr.urlerror as inst:
223 detailed_exit_code = 100
223 detailed_exit_code = 100
224 try: # usually it is in the form (errno, strerror)
224 try: # usually it is in the form (errno, strerror)
225 reason = inst.reason.args[1]
225 reason = inst.reason.args[1]
226 except (AttributeError, IndexError):
226 except (AttributeError, IndexError):
227 # it might be anything, for example a string
227 # it might be anything, for example a string
228 reason = inst.reason
228 reason = inst.reason
229 if isinstance(reason, pycompat.unicode):
229 if isinstance(reason, pycompat.unicode):
230 # SSLError of Python 2.7.9 contains a unicode
230 # SSLError of Python 2.7.9 contains a unicode
231 reason = encoding.unitolocal(reason)
231 reason = encoding.unitolocal(reason)
232 ui.error(_(b"abort: error: %s\n") % stringutil.forcebytestr(reason))
232 ui.error(_(b"abort: error: %s\n") % stringutil.forcebytestr(reason))
233 except (IOError, OSError) as inst:
233 except (IOError, OSError) as inst:
234 if (
234 if (
235 util.safehasattr(inst, b"args")
235 util.safehasattr(inst, b"args")
236 and inst.args
236 and inst.args
237 and inst.args[0] == errno.EPIPE
237 and inst.args[0] == errno.EPIPE
238 ):
238 ):
239 pass
239 pass
240 elif getattr(inst, "strerror", None): # common IOError or OSError
240 elif getattr(inst, "strerror", None): # common IOError or OSError
241 if getattr(inst, "filename", None) is not None:
241 if getattr(inst, "filename", None) is not None:
242 ui.error(
242 ui.error(
243 _(b"abort: %s: '%s'\n")
243 _(b"abort: %s: '%s'\n")
244 % (
244 % (
245 encoding.strtolocal(inst.strerror),
245 encoding.strtolocal(inst.strerror),
246 stringutil.forcebytestr(inst.filename),
246 stringutil.forcebytestr(inst.filename),
247 )
247 )
248 )
248 )
249 else:
249 else:
250 ui.error(_(b"abort: %s\n") % encoding.strtolocal(inst.strerror))
250 ui.error(_(b"abort: %s\n") % encoding.strtolocal(inst.strerror))
251 else: # suspicious IOError
251 else: # suspicious IOError
252 raise
252 raise
253 except MemoryError:
253 except MemoryError:
254 ui.error(_(b"abort: out of memory\n"))
254 ui.error(_(b"abort: out of memory\n"))
255 except SystemExit as inst:
255 except SystemExit as inst:
256 # Commands shouldn't sys.exit directly, but give a return code.
256 # Commands shouldn't sys.exit directly, but give a return code.
257 # Just in case catch this and and pass exit code to caller.
257 # Just in case catch this and and pass exit code to caller.
258 detailed_exit_code = 254
258 detailed_exit_code = 254
259 coarse_exit_code = inst.code
259 coarse_exit_code = inst.code
260
260
261 if ui.configbool(b'ui', b'detailed-exit-code'):
261 if ui.configbool(b'ui', b'detailed-exit-code'):
262 return detailed_exit_code
262 return detailed_exit_code
263 else:
263 else:
264 return coarse_exit_code
264 return coarse_exit_code
265
265
266
266
267 def checknewlabel(repo, lbl, kind):
267 def checknewlabel(repo, lbl, kind):
268 # Do not use the "kind" parameter in ui output.
268 # Do not use the "kind" parameter in ui output.
269 # It makes strings difficult to translate.
269 # It makes strings difficult to translate.
270 if lbl in [b'tip', b'.', b'null']:
270 if lbl in [b'tip', b'.', b'null']:
271 raise error.InputError(_(b"the name '%s' is reserved") % lbl)
271 raise error.InputError(_(b"the name '%s' is reserved") % lbl)
272 for c in (b':', b'\0', b'\n', b'\r'):
272 for c in (b':', b'\0', b'\n', b'\r'):
273 if c in lbl:
273 if c in lbl:
274 raise error.InputError(
274 raise error.InputError(
275 _(b"%r cannot be used in a name") % pycompat.bytestr(c)
275 _(b"%r cannot be used in a name") % pycompat.bytestr(c)
276 )
276 )
277 try:
277 try:
278 int(lbl)
278 int(lbl)
279 raise error.InputError(_(b"cannot use an integer as a name"))
279 raise error.InputError(_(b"cannot use an integer as a name"))
280 except ValueError:
280 except ValueError:
281 pass
281 pass
282 if lbl.strip() != lbl:
282 if lbl.strip() != lbl:
283 raise error.InputError(
283 raise error.InputError(
284 _(b"leading or trailing whitespace in name %r") % lbl
284 _(b"leading or trailing whitespace in name %r") % lbl
285 )
285 )
286
286
287
287
288 def checkfilename(f):
288 def checkfilename(f):
289 '''Check that the filename f is an acceptable filename for a tracked file'''
289 '''Check that the filename f is an acceptable filename for a tracked file'''
290 if b'\r' in f or b'\n' in f:
290 if b'\r' in f or b'\n' in f:
291 raise error.InputError(
291 raise error.InputError(
292 _(b"'\\n' and '\\r' disallowed in filenames: %r")
292 _(b"'\\n' and '\\r' disallowed in filenames: %r")
293 % pycompat.bytestr(f)
293 % pycompat.bytestr(f)
294 )
294 )
295
295
296
296
297 def checkportable(ui, f):
297 def checkportable(ui, f):
298 '''Check if filename f is portable and warn or abort depending on config'''
298 '''Check if filename f is portable and warn or abort depending on config'''
299 checkfilename(f)
299 checkfilename(f)
300 abort, warn = checkportabilityalert(ui)
300 abort, warn = checkportabilityalert(ui)
301 if abort or warn:
301 if abort or warn:
302 msg = util.checkwinfilename(f)
302 msg = util.checkwinfilename(f)
303 if msg:
303 if msg:
304 msg = b"%s: %s" % (msg, procutil.shellquote(f))
304 msg = b"%s: %s" % (msg, procutil.shellquote(f))
305 if abort:
305 if abort:
306 raise error.InputError(msg)
306 raise error.InputError(msg)
307 ui.warn(_(b"warning: %s\n") % msg)
307 ui.warn(_(b"warning: %s\n") % msg)
308
308
309
309
310 def checkportabilityalert(ui):
310 def checkportabilityalert(ui):
311 """check if the user's config requests nothing, a warning, or abort for
311 """check if the user's config requests nothing, a warning, or abort for
312 non-portable filenames"""
312 non-portable filenames"""
313 val = ui.config(b'ui', b'portablefilenames')
313 val = ui.config(b'ui', b'portablefilenames')
314 lval = val.lower()
314 lval = val.lower()
315 bval = stringutil.parsebool(val)
315 bval = stringutil.parsebool(val)
316 abort = pycompat.iswindows or lval == b'abort'
316 abort = pycompat.iswindows or lval == b'abort'
317 warn = bval or lval == b'warn'
317 warn = bval or lval == b'warn'
318 if bval is None and not (warn or abort or lval == b'ignore'):
318 if bval is None and not (warn or abort or lval == b'ignore'):
319 raise error.ConfigError(
319 raise error.ConfigError(
320 _(b"ui.portablefilenames value is invalid ('%s')") % val
320 _(b"ui.portablefilenames value is invalid ('%s')") % val
321 )
321 )
322 return abort, warn
322 return abort, warn
323
323
324
324
325 class casecollisionauditor(object):
325 class casecollisionauditor(object):
326 def __init__(self, ui, abort, dirstate):
326 def __init__(self, ui, abort, dirstate):
327 self._ui = ui
327 self._ui = ui
328 self._abort = abort
328 self._abort = abort
329 allfiles = b'\0'.join(dirstate)
329 allfiles = b'\0'.join(dirstate)
330 self._loweredfiles = set(encoding.lower(allfiles).split(b'\0'))
330 self._loweredfiles = set(encoding.lower(allfiles).split(b'\0'))
331 self._dirstate = dirstate
331 self._dirstate = dirstate
332 # The purpose of _newfiles is so that we don't complain about
332 # The purpose of _newfiles is so that we don't complain about
333 # case collisions if someone were to call this object with the
333 # case collisions if someone were to call this object with the
334 # same filename twice.
334 # same filename twice.
335 self._newfiles = set()
335 self._newfiles = set()
336
336
337 def __call__(self, f):
337 def __call__(self, f):
338 if f in self._newfiles:
338 if f in self._newfiles:
339 return
339 return
340 fl = encoding.lower(f)
340 fl = encoding.lower(f)
341 if fl in self._loweredfiles and f not in self._dirstate:
341 if fl in self._loweredfiles and f not in self._dirstate:
342 msg = _(b'possible case-folding collision for %s') % f
342 msg = _(b'possible case-folding collision for %s') % f
343 if self._abort:
343 if self._abort:
344 raise error.Abort(msg)
344 raise error.StateError(msg)
345 self._ui.warn(_(b"warning: %s\n") % msg)
345 self._ui.warn(_(b"warning: %s\n") % msg)
346 self._loweredfiles.add(fl)
346 self._loweredfiles.add(fl)
347 self._newfiles.add(f)
347 self._newfiles.add(f)
348
348
349
349
350 def filteredhash(repo, maxrev):
350 def filteredhash(repo, maxrev):
351 """build hash of filtered revisions in the current repoview.
351 """build hash of filtered revisions in the current repoview.
352
352
353 Multiple caches perform up-to-date validation by checking that the
353 Multiple caches perform up-to-date validation by checking that the
354 tiprev and tipnode stored in the cache file match the current repository.
354 tiprev and tipnode stored in the cache file match the current repository.
355 However, this is not sufficient for validating repoviews because the set
355 However, this is not sufficient for validating repoviews because the set
356 of revisions in the view may change without the repository tiprev and
356 of revisions in the view may change without the repository tiprev and
357 tipnode changing.
357 tipnode changing.
358
358
359 This function hashes all the revs filtered from the view and returns
359 This function hashes all the revs filtered from the view and returns
360 that SHA-1 digest.
360 that SHA-1 digest.
361 """
361 """
362 cl = repo.changelog
362 cl = repo.changelog
363 if not cl.filteredrevs:
363 if not cl.filteredrevs:
364 return None
364 return None
365 key = cl._filteredrevs_hashcache.get(maxrev)
365 key = cl._filteredrevs_hashcache.get(maxrev)
366 if not key:
366 if not key:
367 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
367 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
368 if revs:
368 if revs:
369 s = hashutil.sha1()
369 s = hashutil.sha1()
370 for rev in revs:
370 for rev in revs:
371 s.update(b'%d;' % rev)
371 s.update(b'%d;' % rev)
372 key = s.digest()
372 key = s.digest()
373 cl._filteredrevs_hashcache[maxrev] = key
373 cl._filteredrevs_hashcache[maxrev] = key
374 return key
374 return key
375
375
376
376
377 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
377 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
378 """yield every hg repository under path, always recursively.
378 """yield every hg repository under path, always recursively.
379 The recurse flag will only control recursion into repo working dirs"""
379 The recurse flag will only control recursion into repo working dirs"""
380
380
381 def errhandler(err):
381 def errhandler(err):
382 if err.filename == path:
382 if err.filename == path:
383 raise err
383 raise err
384
384
385 samestat = getattr(os.path, 'samestat', None)
385 samestat = getattr(os.path, 'samestat', None)
386 if followsym and samestat is not None:
386 if followsym and samestat is not None:
387
387
388 def adddir(dirlst, dirname):
388 def adddir(dirlst, dirname):
389 dirstat = os.stat(dirname)
389 dirstat = os.stat(dirname)
390 match = any(samestat(dirstat, lstdirstat) for lstdirstat in dirlst)
390 match = any(samestat(dirstat, lstdirstat) for lstdirstat in dirlst)
391 if not match:
391 if not match:
392 dirlst.append(dirstat)
392 dirlst.append(dirstat)
393 return not match
393 return not match
394
394
395 else:
395 else:
396 followsym = False
396 followsym = False
397
397
398 if (seen_dirs is None) and followsym:
398 if (seen_dirs is None) and followsym:
399 seen_dirs = []
399 seen_dirs = []
400 adddir(seen_dirs, path)
400 adddir(seen_dirs, path)
401 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
401 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
402 dirs.sort()
402 dirs.sort()
403 if b'.hg' in dirs:
403 if b'.hg' in dirs:
404 yield root # found a repository
404 yield root # found a repository
405 qroot = os.path.join(root, b'.hg', b'patches')
405 qroot = os.path.join(root, b'.hg', b'patches')
406 if os.path.isdir(os.path.join(qroot, b'.hg')):
406 if os.path.isdir(os.path.join(qroot, b'.hg')):
407 yield qroot # we have a patch queue repo here
407 yield qroot # we have a patch queue repo here
408 if recurse:
408 if recurse:
409 # avoid recursing inside the .hg directory
409 # avoid recursing inside the .hg directory
410 dirs.remove(b'.hg')
410 dirs.remove(b'.hg')
411 else:
411 else:
412 dirs[:] = [] # don't descend further
412 dirs[:] = [] # don't descend further
413 elif followsym:
413 elif followsym:
414 newdirs = []
414 newdirs = []
415 for d in dirs:
415 for d in dirs:
416 fname = os.path.join(root, d)
416 fname = os.path.join(root, d)
417 if adddir(seen_dirs, fname):
417 if adddir(seen_dirs, fname):
418 if os.path.islink(fname):
418 if os.path.islink(fname):
419 for hgname in walkrepos(fname, True, seen_dirs):
419 for hgname in walkrepos(fname, True, seen_dirs):
420 yield hgname
420 yield hgname
421 else:
421 else:
422 newdirs.append(d)
422 newdirs.append(d)
423 dirs[:] = newdirs
423 dirs[:] = newdirs
424
424
425
425
426 def binnode(ctx):
426 def binnode(ctx):
427 """Return binary node id for a given basectx"""
427 """Return binary node id for a given basectx"""
428 node = ctx.node()
428 node = ctx.node()
429 if node is None:
429 if node is None:
430 return ctx.repo().nodeconstants.wdirid
430 return ctx.repo().nodeconstants.wdirid
431 return node
431 return node
432
432
433
433
434 def intrev(ctx):
434 def intrev(ctx):
435 """Return integer for a given basectx that can be used in comparison or
435 """Return integer for a given basectx that can be used in comparison or
436 arithmetic operation"""
436 arithmetic operation"""
437 rev = ctx.rev()
437 rev = ctx.rev()
438 if rev is None:
438 if rev is None:
439 return wdirrev
439 return wdirrev
440 return rev
440 return rev
441
441
442
442
443 def formatchangeid(ctx):
443 def formatchangeid(ctx):
444 """Format changectx as '{rev}:{node|formatnode}', which is the default
444 """Format changectx as '{rev}:{node|formatnode}', which is the default
445 template provided by logcmdutil.changesettemplater"""
445 template provided by logcmdutil.changesettemplater"""
446 repo = ctx.repo()
446 repo = ctx.repo()
447 return formatrevnode(repo.ui, intrev(ctx), binnode(ctx))
447 return formatrevnode(repo.ui, intrev(ctx), binnode(ctx))
448
448
449
449
450 def formatrevnode(ui, rev, node):
450 def formatrevnode(ui, rev, node):
451 """Format given revision and node depending on the current verbosity"""
451 """Format given revision and node depending on the current verbosity"""
452 if ui.debugflag:
452 if ui.debugflag:
453 hexfunc = hex
453 hexfunc = hex
454 else:
454 else:
455 hexfunc = short
455 hexfunc = short
456 return b'%d:%s' % (rev, hexfunc(node))
456 return b'%d:%s' % (rev, hexfunc(node))
457
457
458
458
459 def resolvehexnodeidprefix(repo, prefix):
459 def resolvehexnodeidprefix(repo, prefix):
460 if prefix.startswith(b'x'):
460 if prefix.startswith(b'x'):
461 prefix = prefix[1:]
461 prefix = prefix[1:]
462 try:
462 try:
463 # Uses unfiltered repo because it's faster when prefix is ambiguous/
463 # Uses unfiltered repo because it's faster when prefix is ambiguous/
464 # This matches the shortesthexnodeidprefix() function below.
464 # This matches the shortesthexnodeidprefix() function below.
465 node = repo.unfiltered().changelog._partialmatch(prefix)
465 node = repo.unfiltered().changelog._partialmatch(prefix)
466 except error.AmbiguousPrefixLookupError:
466 except error.AmbiguousPrefixLookupError:
467 revset = repo.ui.config(
467 revset = repo.ui.config(
468 b'experimental', b'revisions.disambiguatewithin'
468 b'experimental', b'revisions.disambiguatewithin'
469 )
469 )
470 if revset:
470 if revset:
471 # Clear config to avoid infinite recursion
471 # Clear config to avoid infinite recursion
472 configoverrides = {
472 configoverrides = {
473 (b'experimental', b'revisions.disambiguatewithin'): None
473 (b'experimental', b'revisions.disambiguatewithin'): None
474 }
474 }
475 with repo.ui.configoverride(configoverrides):
475 with repo.ui.configoverride(configoverrides):
476 revs = repo.anyrevs([revset], user=True)
476 revs = repo.anyrevs([revset], user=True)
477 matches = []
477 matches = []
478 for rev in revs:
478 for rev in revs:
479 node = repo.changelog.node(rev)
479 node = repo.changelog.node(rev)
480 if hex(node).startswith(prefix):
480 if hex(node).startswith(prefix):
481 matches.append(node)
481 matches.append(node)
482 if len(matches) == 1:
482 if len(matches) == 1:
483 return matches[0]
483 return matches[0]
484 raise
484 raise
485 if node is None:
485 if node is None:
486 return
486 return
487 repo.changelog.rev(node) # make sure node isn't filtered
487 repo.changelog.rev(node) # make sure node isn't filtered
488 return node
488 return node
489
489
490
490
491 def mayberevnum(repo, prefix):
491 def mayberevnum(repo, prefix):
492 """Checks if the given prefix may be mistaken for a revision number"""
492 """Checks if the given prefix may be mistaken for a revision number"""
493 try:
493 try:
494 i = int(prefix)
494 i = int(prefix)
495 # if we are a pure int, then starting with zero will not be
495 # if we are a pure int, then starting with zero will not be
496 # confused as a rev; or, obviously, if the int is larger
496 # confused as a rev; or, obviously, if the int is larger
497 # than the value of the tip rev. We still need to disambiguate if
497 # than the value of the tip rev. We still need to disambiguate if
498 # prefix == '0', since that *is* a valid revnum.
498 # prefix == '0', since that *is* a valid revnum.
499 if (prefix != b'0' and prefix[0:1] == b'0') or i >= len(repo):
499 if (prefix != b'0' and prefix[0:1] == b'0') or i >= len(repo):
500 return False
500 return False
501 return True
501 return True
502 except ValueError:
502 except ValueError:
503 return False
503 return False
504
504
505
505
506 def shortesthexnodeidprefix(repo, node, minlength=1, cache=None):
506 def shortesthexnodeidprefix(repo, node, minlength=1, cache=None):
507 """Find the shortest unambiguous prefix that matches hexnode.
507 """Find the shortest unambiguous prefix that matches hexnode.
508
508
509 If "cache" is not None, it must be a dictionary that can be used for
509 If "cache" is not None, it must be a dictionary that can be used for
510 caching between calls to this method.
510 caching between calls to this method.
511 """
511 """
512 # _partialmatch() of filtered changelog could take O(len(repo)) time,
512 # _partialmatch() of filtered changelog could take O(len(repo)) time,
513 # which would be unacceptably slow. so we look for hash collision in
513 # which would be unacceptably slow. so we look for hash collision in
514 # unfiltered space, which means some hashes may be slightly longer.
514 # unfiltered space, which means some hashes may be slightly longer.
515
515
516 minlength = max(minlength, 1)
516 minlength = max(minlength, 1)
517
517
518 def disambiguate(prefix):
518 def disambiguate(prefix):
519 """Disambiguate against revnums."""
519 """Disambiguate against revnums."""
520 if repo.ui.configbool(b'experimental', b'revisions.prefixhexnode'):
520 if repo.ui.configbool(b'experimental', b'revisions.prefixhexnode'):
521 if mayberevnum(repo, prefix):
521 if mayberevnum(repo, prefix):
522 return b'x' + prefix
522 return b'x' + prefix
523 else:
523 else:
524 return prefix
524 return prefix
525
525
526 hexnode = hex(node)
526 hexnode = hex(node)
527 for length in range(len(prefix), len(hexnode) + 1):
527 for length in range(len(prefix), len(hexnode) + 1):
528 prefix = hexnode[:length]
528 prefix = hexnode[:length]
529 if not mayberevnum(repo, prefix):
529 if not mayberevnum(repo, prefix):
530 return prefix
530 return prefix
531
531
532 cl = repo.unfiltered().changelog
532 cl = repo.unfiltered().changelog
533 revset = repo.ui.config(b'experimental', b'revisions.disambiguatewithin')
533 revset = repo.ui.config(b'experimental', b'revisions.disambiguatewithin')
534 if revset:
534 if revset:
535 revs = None
535 revs = None
536 if cache is not None:
536 if cache is not None:
537 revs = cache.get(b'disambiguationrevset')
537 revs = cache.get(b'disambiguationrevset')
538 if revs is None:
538 if revs is None:
539 revs = repo.anyrevs([revset], user=True)
539 revs = repo.anyrevs([revset], user=True)
540 if cache is not None:
540 if cache is not None:
541 cache[b'disambiguationrevset'] = revs
541 cache[b'disambiguationrevset'] = revs
542 if cl.rev(node) in revs:
542 if cl.rev(node) in revs:
543 hexnode = hex(node)
543 hexnode = hex(node)
544 nodetree = None
544 nodetree = None
545 if cache is not None:
545 if cache is not None:
546 nodetree = cache.get(b'disambiguationnodetree')
546 nodetree = cache.get(b'disambiguationnodetree')
547 if not nodetree:
547 if not nodetree:
548 if util.safehasattr(parsers, 'nodetree'):
548 if util.safehasattr(parsers, 'nodetree'):
549 # The CExt is the only implementation to provide a nodetree
549 # The CExt is the only implementation to provide a nodetree
550 # class so far.
550 # class so far.
551 index = cl.index
551 index = cl.index
552 if util.safehasattr(index, 'get_cindex'):
552 if util.safehasattr(index, 'get_cindex'):
553 # the rust wrapped need to give access to its internal index
553 # the rust wrapped need to give access to its internal index
554 index = index.get_cindex()
554 index = index.get_cindex()
555 nodetree = parsers.nodetree(index, len(revs))
555 nodetree = parsers.nodetree(index, len(revs))
556 for r in revs:
556 for r in revs:
557 nodetree.insert(r)
557 nodetree.insert(r)
558 if cache is not None:
558 if cache is not None:
559 cache[b'disambiguationnodetree'] = nodetree
559 cache[b'disambiguationnodetree'] = nodetree
560 if nodetree is not None:
560 if nodetree is not None:
561 length = max(nodetree.shortest(node), minlength)
561 length = max(nodetree.shortest(node), minlength)
562 prefix = hexnode[:length]
562 prefix = hexnode[:length]
563 return disambiguate(prefix)
563 return disambiguate(prefix)
564 for length in range(minlength, len(hexnode) + 1):
564 for length in range(minlength, len(hexnode) + 1):
565 matches = []
565 matches = []
566 prefix = hexnode[:length]
566 prefix = hexnode[:length]
567 for rev in revs:
567 for rev in revs:
568 otherhexnode = repo[rev].hex()
568 otherhexnode = repo[rev].hex()
569 if prefix == otherhexnode[:length]:
569 if prefix == otherhexnode[:length]:
570 matches.append(otherhexnode)
570 matches.append(otherhexnode)
571 if len(matches) == 1:
571 if len(matches) == 1:
572 return disambiguate(prefix)
572 return disambiguate(prefix)
573
573
574 try:
574 try:
575 return disambiguate(cl.shortest(node, minlength))
575 return disambiguate(cl.shortest(node, minlength))
576 except error.LookupError:
576 except error.LookupError:
577 raise error.RepoLookupError()
577 raise error.RepoLookupError()
578
578
579
579
580 def isrevsymbol(repo, symbol):
580 def isrevsymbol(repo, symbol):
581 """Checks if a symbol exists in the repo.
581 """Checks if a symbol exists in the repo.
582
582
583 See revsymbol() for details. Raises error.AmbiguousPrefixLookupError if the
583 See revsymbol() for details. Raises error.AmbiguousPrefixLookupError if the
584 symbol is an ambiguous nodeid prefix.
584 symbol is an ambiguous nodeid prefix.
585 """
585 """
586 try:
586 try:
587 revsymbol(repo, symbol)
587 revsymbol(repo, symbol)
588 return True
588 return True
589 except error.RepoLookupError:
589 except error.RepoLookupError:
590 return False
590 return False
591
591
592
592
593 def revsymbol(repo, symbol):
593 def revsymbol(repo, symbol):
594 """Returns a context given a single revision symbol (as string).
594 """Returns a context given a single revision symbol (as string).
595
595
596 This is similar to revsingle(), but accepts only a single revision symbol,
596 This is similar to revsingle(), but accepts only a single revision symbol,
597 i.e. things like ".", "tip", "1234", "deadbeef", "my-bookmark" work, but
597 i.e. things like ".", "tip", "1234", "deadbeef", "my-bookmark" work, but
598 not "max(public())".
598 not "max(public())".
599 """
599 """
600 if not isinstance(symbol, bytes):
600 if not isinstance(symbol, bytes):
601 msg = (
601 msg = (
602 b"symbol (%s of type %s) was not a string, did you mean "
602 b"symbol (%s of type %s) was not a string, did you mean "
603 b"repo[symbol]?" % (symbol, type(symbol))
603 b"repo[symbol]?" % (symbol, type(symbol))
604 )
604 )
605 raise error.ProgrammingError(msg)
605 raise error.ProgrammingError(msg)
606 try:
606 try:
607 if symbol in (b'.', b'tip', b'null'):
607 if symbol in (b'.', b'tip', b'null'):
608 return repo[symbol]
608 return repo[symbol]
609
609
610 try:
610 try:
611 r = int(symbol)
611 r = int(symbol)
612 if b'%d' % r != symbol:
612 if b'%d' % r != symbol:
613 raise ValueError
613 raise ValueError
614 l = len(repo.changelog)
614 l = len(repo.changelog)
615 if r < 0:
615 if r < 0:
616 r += l
616 r += l
617 if r < 0 or r >= l and r != wdirrev:
617 if r < 0 or r >= l and r != wdirrev:
618 raise ValueError
618 raise ValueError
619 return repo[r]
619 return repo[r]
620 except error.FilteredIndexError:
620 except error.FilteredIndexError:
621 raise
621 raise
622 except (ValueError, OverflowError, IndexError):
622 except (ValueError, OverflowError, IndexError):
623 pass
623 pass
624
624
625 if len(symbol) == 2 * repo.nodeconstants.nodelen:
625 if len(symbol) == 2 * repo.nodeconstants.nodelen:
626 try:
626 try:
627 node = bin(symbol)
627 node = bin(symbol)
628 rev = repo.changelog.rev(node)
628 rev = repo.changelog.rev(node)
629 return repo[rev]
629 return repo[rev]
630 except error.FilteredLookupError:
630 except error.FilteredLookupError:
631 raise
631 raise
632 except (TypeError, LookupError):
632 except (TypeError, LookupError):
633 pass
633 pass
634
634
635 # look up bookmarks through the name interface
635 # look up bookmarks through the name interface
636 try:
636 try:
637 node = repo.names.singlenode(repo, symbol)
637 node = repo.names.singlenode(repo, symbol)
638 rev = repo.changelog.rev(node)
638 rev = repo.changelog.rev(node)
639 return repo[rev]
639 return repo[rev]
640 except KeyError:
640 except KeyError:
641 pass
641 pass
642
642
643 node = resolvehexnodeidprefix(repo, symbol)
643 node = resolvehexnodeidprefix(repo, symbol)
644 if node is not None:
644 if node is not None:
645 rev = repo.changelog.rev(node)
645 rev = repo.changelog.rev(node)
646 return repo[rev]
646 return repo[rev]
647
647
648 raise error.RepoLookupError(_(b"unknown revision '%s'") % symbol)
648 raise error.RepoLookupError(_(b"unknown revision '%s'") % symbol)
649
649
650 except error.WdirUnsupported:
650 except error.WdirUnsupported:
651 return repo[None]
651 return repo[None]
652 except (
652 except (
653 error.FilteredIndexError,
653 error.FilteredIndexError,
654 error.FilteredLookupError,
654 error.FilteredLookupError,
655 error.FilteredRepoLookupError,
655 error.FilteredRepoLookupError,
656 ):
656 ):
657 raise _filterederror(repo, symbol)
657 raise _filterederror(repo, symbol)
658
658
659
659
660 def _filterederror(repo, changeid):
660 def _filterederror(repo, changeid):
661 """build an exception to be raised about a filtered changeid
661 """build an exception to be raised about a filtered changeid
662
662
663 This is extracted in a function to help extensions (eg: evolve) to
663 This is extracted in a function to help extensions (eg: evolve) to
664 experiment with various message variants."""
664 experiment with various message variants."""
665 if repo.filtername.startswith(b'visible'):
665 if repo.filtername.startswith(b'visible'):
666
666
667 # Check if the changeset is obsolete
667 # Check if the changeset is obsolete
668 unfilteredrepo = repo.unfiltered()
668 unfilteredrepo = repo.unfiltered()
669 ctx = revsymbol(unfilteredrepo, changeid)
669 ctx = revsymbol(unfilteredrepo, changeid)
670
670
671 # If the changeset is obsolete, enrich the message with the reason
671 # If the changeset is obsolete, enrich the message with the reason
672 # that made this changeset not visible
672 # that made this changeset not visible
673 if ctx.obsolete():
673 if ctx.obsolete():
674 msg = obsutil._getfilteredreason(repo, changeid, ctx)
674 msg = obsutil._getfilteredreason(repo, changeid, ctx)
675 else:
675 else:
676 msg = _(b"hidden revision '%s'") % changeid
676 msg = _(b"hidden revision '%s'") % changeid
677
677
678 hint = _(b'use --hidden to access hidden revisions')
678 hint = _(b'use --hidden to access hidden revisions')
679
679
680 return error.FilteredRepoLookupError(msg, hint=hint)
680 return error.FilteredRepoLookupError(msg, hint=hint)
681 msg = _(b"filtered revision '%s' (not in '%s' subset)")
681 msg = _(b"filtered revision '%s' (not in '%s' subset)")
682 msg %= (changeid, repo.filtername)
682 msg %= (changeid, repo.filtername)
683 return error.FilteredRepoLookupError(msg)
683 return error.FilteredRepoLookupError(msg)
684
684
685
685
686 def revsingle(repo, revspec, default=b'.', localalias=None):
686 def revsingle(repo, revspec, default=b'.', localalias=None):
687 if not revspec and revspec != 0:
687 if not revspec and revspec != 0:
688 return repo[default]
688 return repo[default]
689
689
690 l = revrange(repo, [revspec], localalias=localalias)
690 l = revrange(repo, [revspec], localalias=localalias)
691 if not l:
691 if not l:
692 raise error.InputError(_(b'empty revision set'))
692 raise error.InputError(_(b'empty revision set'))
693 return repo[l.last()]
693 return repo[l.last()]
694
694
695
695
696 def _pairspec(revspec):
696 def _pairspec(revspec):
697 tree = revsetlang.parse(revspec)
697 tree = revsetlang.parse(revspec)
698 return tree and tree[0] in (
698 return tree and tree[0] in (
699 b'range',
699 b'range',
700 b'rangepre',
700 b'rangepre',
701 b'rangepost',
701 b'rangepost',
702 b'rangeall',
702 b'rangeall',
703 )
703 )
704
704
705
705
706 def revpair(repo, revs):
706 def revpair(repo, revs):
707 if not revs:
707 if not revs:
708 return repo[b'.'], repo[None]
708 return repo[b'.'], repo[None]
709
709
710 l = revrange(repo, revs)
710 l = revrange(repo, revs)
711
711
712 if not l:
712 if not l:
713 raise error.InputError(_(b'empty revision range'))
713 raise error.InputError(_(b'empty revision range'))
714
714
715 first = l.first()
715 first = l.first()
716 second = l.last()
716 second = l.last()
717
717
718 if (
718 if (
719 first == second
719 first == second
720 and len(revs) >= 2
720 and len(revs) >= 2
721 and not all(revrange(repo, [r]) for r in revs)
721 and not all(revrange(repo, [r]) for r in revs)
722 ):
722 ):
723 raise error.InputError(_(b'empty revision on one side of range'))
723 raise error.InputError(_(b'empty revision on one side of range'))
724
724
725 # if top-level is range expression, the result must always be a pair
725 # if top-level is range expression, the result must always be a pair
726 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
726 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
727 return repo[first], repo[None]
727 return repo[first], repo[None]
728
728
729 return repo[first], repo[second]
729 return repo[first], repo[second]
730
730
731
731
732 def revrange(repo, specs, localalias=None):
732 def revrange(repo, specs, localalias=None):
733 """Execute 1 to many revsets and return the union.
733 """Execute 1 to many revsets and return the union.
734
734
735 This is the preferred mechanism for executing revsets using user-specified
735 This is the preferred mechanism for executing revsets using user-specified
736 config options, such as revset aliases.
736 config options, such as revset aliases.
737
737
738 The revsets specified by ``specs`` will be executed via a chained ``OR``
738 The revsets specified by ``specs`` will be executed via a chained ``OR``
739 expression. If ``specs`` is empty, an empty result is returned.
739 expression. If ``specs`` is empty, an empty result is returned.
740
740
741 ``specs`` can contain integers, in which case they are assumed to be
741 ``specs`` can contain integers, in which case they are assumed to be
742 revision numbers.
742 revision numbers.
743
743
744 It is assumed the revsets are already formatted. If you have arguments
744 It is assumed the revsets are already formatted. If you have arguments
745 that need to be expanded in the revset, call ``revsetlang.formatspec()``
745 that need to be expanded in the revset, call ``revsetlang.formatspec()``
746 and pass the result as an element of ``specs``.
746 and pass the result as an element of ``specs``.
747
747
748 Specifying a single revset is allowed.
748 Specifying a single revset is allowed.
749
749
750 Returns a ``smartset.abstractsmartset`` which is a list-like interface over
750 Returns a ``smartset.abstractsmartset`` which is a list-like interface over
751 integer revisions.
751 integer revisions.
752 """
752 """
753 allspecs = []
753 allspecs = []
754 for spec in specs:
754 for spec in specs:
755 if isinstance(spec, int):
755 if isinstance(spec, int):
756 spec = revsetlang.formatspec(b'%d', spec)
756 spec = revsetlang.formatspec(b'%d', spec)
757 allspecs.append(spec)
757 allspecs.append(spec)
758 return repo.anyrevs(allspecs, user=True, localalias=localalias)
758 return repo.anyrevs(allspecs, user=True, localalias=localalias)
759
759
760
760
761 def increasingwindows(windowsize=8, sizelimit=512):
761 def increasingwindows(windowsize=8, sizelimit=512):
762 while True:
762 while True:
763 yield windowsize
763 yield windowsize
764 if windowsize < sizelimit:
764 if windowsize < sizelimit:
765 windowsize *= 2
765 windowsize *= 2
766
766
767
767
768 def walkchangerevs(repo, revs, makefilematcher, prepare):
768 def walkchangerevs(repo, revs, makefilematcher, prepare):
769 """Iterate over files and the revs in a "windowed" way.
769 """Iterate over files and the revs in a "windowed" way.
770
770
771 Callers most commonly need to iterate backwards over the history
771 Callers most commonly need to iterate backwards over the history
772 in which they are interested. Doing so has awful (quadratic-looking)
772 in which they are interested. Doing so has awful (quadratic-looking)
773 performance, so we use iterators in a "windowed" way.
773 performance, so we use iterators in a "windowed" way.
774
774
775 We walk a window of revisions in the desired order. Within the
775 We walk a window of revisions in the desired order. Within the
776 window, we first walk forwards to gather data, then in the desired
776 window, we first walk forwards to gather data, then in the desired
777 order (usually backwards) to display it.
777 order (usually backwards) to display it.
778
778
779 This function returns an iterator yielding contexts. Before
779 This function returns an iterator yielding contexts. Before
780 yielding each context, the iterator will first call the prepare
780 yielding each context, the iterator will first call the prepare
781 function on each context in the window in forward order."""
781 function on each context in the window in forward order."""
782
782
783 if not revs:
783 if not revs:
784 return []
784 return []
785 change = repo.__getitem__
785 change = repo.__getitem__
786
786
787 def iterate():
787 def iterate():
788 it = iter(revs)
788 it = iter(revs)
789 stopiteration = False
789 stopiteration = False
790 for windowsize in increasingwindows():
790 for windowsize in increasingwindows():
791 nrevs = []
791 nrevs = []
792 for i in pycompat.xrange(windowsize):
792 for i in pycompat.xrange(windowsize):
793 rev = next(it, None)
793 rev = next(it, None)
794 if rev is None:
794 if rev is None:
795 stopiteration = True
795 stopiteration = True
796 break
796 break
797 nrevs.append(rev)
797 nrevs.append(rev)
798 for rev in sorted(nrevs):
798 for rev in sorted(nrevs):
799 ctx = change(rev)
799 ctx = change(rev)
800 prepare(ctx, makefilematcher(ctx))
800 prepare(ctx, makefilematcher(ctx))
801 for rev in nrevs:
801 for rev in nrevs:
802 yield change(rev)
802 yield change(rev)
803
803
804 if stopiteration:
804 if stopiteration:
805 break
805 break
806
806
807 return iterate()
807 return iterate()
808
808
809
809
810 def meaningfulparents(repo, ctx):
810 def meaningfulparents(repo, ctx):
811 """Return list of meaningful (or all if debug) parentrevs for rev.
811 """Return list of meaningful (or all if debug) parentrevs for rev.
812
812
813 For merges (two non-nullrev revisions) both parents are meaningful.
813 For merges (two non-nullrev revisions) both parents are meaningful.
814 Otherwise the first parent revision is considered meaningful if it
814 Otherwise the first parent revision is considered meaningful if it
815 is not the preceding revision.
815 is not the preceding revision.
816 """
816 """
817 parents = ctx.parents()
817 parents = ctx.parents()
818 if len(parents) > 1:
818 if len(parents) > 1:
819 return parents
819 return parents
820 if repo.ui.debugflag:
820 if repo.ui.debugflag:
821 return [parents[0], repo[nullrev]]
821 return [parents[0], repo[nullrev]]
822 if parents[0].rev() >= intrev(ctx) - 1:
822 if parents[0].rev() >= intrev(ctx) - 1:
823 return []
823 return []
824 return parents
824 return parents
825
825
826
826
827 def getuipathfn(repo, legacyrelativevalue=False, forcerelativevalue=None):
827 def getuipathfn(repo, legacyrelativevalue=False, forcerelativevalue=None):
828 """Return a function that produced paths for presenting to the user.
828 """Return a function that produced paths for presenting to the user.
829
829
830 The returned function takes a repo-relative path and produces a path
830 The returned function takes a repo-relative path and produces a path
831 that can be presented in the UI.
831 that can be presented in the UI.
832
832
833 Depending on the value of ui.relative-paths, either a repo-relative or
833 Depending on the value of ui.relative-paths, either a repo-relative or
834 cwd-relative path will be produced.
834 cwd-relative path will be produced.
835
835
836 legacyrelativevalue is the value to use if ui.relative-paths=legacy
836 legacyrelativevalue is the value to use if ui.relative-paths=legacy
837
837
838 If forcerelativevalue is not None, then that value will be used regardless
838 If forcerelativevalue is not None, then that value will be used regardless
839 of what ui.relative-paths is set to.
839 of what ui.relative-paths is set to.
840 """
840 """
841 if forcerelativevalue is not None:
841 if forcerelativevalue is not None:
842 relative = forcerelativevalue
842 relative = forcerelativevalue
843 else:
843 else:
844 config = repo.ui.config(b'ui', b'relative-paths')
844 config = repo.ui.config(b'ui', b'relative-paths')
845 if config == b'legacy':
845 if config == b'legacy':
846 relative = legacyrelativevalue
846 relative = legacyrelativevalue
847 else:
847 else:
848 relative = stringutil.parsebool(config)
848 relative = stringutil.parsebool(config)
849 if relative is None:
849 if relative is None:
850 raise error.ConfigError(
850 raise error.ConfigError(
851 _(b"ui.relative-paths is not a boolean ('%s')") % config
851 _(b"ui.relative-paths is not a boolean ('%s')") % config
852 )
852 )
853
853
854 if relative:
854 if relative:
855 cwd = repo.getcwd()
855 cwd = repo.getcwd()
856 if cwd != b'':
856 if cwd != b'':
857 # this branch would work even if cwd == b'' (ie cwd = repo
857 # this branch would work even if cwd == b'' (ie cwd = repo
858 # root), but its generality makes the returned function slower
858 # root), but its generality makes the returned function slower
859 pathto = repo.pathto
859 pathto = repo.pathto
860 return lambda f: pathto(f, cwd)
860 return lambda f: pathto(f, cwd)
861 if repo.ui.configbool(b'ui', b'slash'):
861 if repo.ui.configbool(b'ui', b'slash'):
862 return lambda f: f
862 return lambda f: f
863 else:
863 else:
864 return util.localpath
864 return util.localpath
865
865
866
866
867 def subdiruipathfn(subpath, uipathfn):
867 def subdiruipathfn(subpath, uipathfn):
868 '''Create a new uipathfn that treats the file as relative to subpath.'''
868 '''Create a new uipathfn that treats the file as relative to subpath.'''
869 return lambda f: uipathfn(posixpath.join(subpath, f))
869 return lambda f: uipathfn(posixpath.join(subpath, f))
870
870
871
871
872 def anypats(pats, opts):
872 def anypats(pats, opts):
873 """Checks if any patterns, including --include and --exclude were given.
873 """Checks if any patterns, including --include and --exclude were given.
874
874
875 Some commands (e.g. addremove) use this condition for deciding whether to
875 Some commands (e.g. addremove) use this condition for deciding whether to
876 print absolute or relative paths.
876 print absolute or relative paths.
877 """
877 """
878 return bool(pats or opts.get(b'include') or opts.get(b'exclude'))
878 return bool(pats or opts.get(b'include') or opts.get(b'exclude'))
879
879
880
880
881 def expandpats(pats):
881 def expandpats(pats):
882 """Expand bare globs when running on windows.
882 """Expand bare globs when running on windows.
883 On posix we assume it already has already been done by sh."""
883 On posix we assume it already has already been done by sh."""
884 if not util.expandglobs:
884 if not util.expandglobs:
885 return list(pats)
885 return list(pats)
886 ret = []
886 ret = []
887 for kindpat in pats:
887 for kindpat in pats:
888 kind, pat = matchmod._patsplit(kindpat, None)
888 kind, pat = matchmod._patsplit(kindpat, None)
889 if kind is None:
889 if kind is None:
890 try:
890 try:
891 globbed = glob.glob(pat)
891 globbed = glob.glob(pat)
892 except re.error:
892 except re.error:
893 globbed = [pat]
893 globbed = [pat]
894 if globbed:
894 if globbed:
895 ret.extend(globbed)
895 ret.extend(globbed)
896 continue
896 continue
897 ret.append(kindpat)
897 ret.append(kindpat)
898 return ret
898 return ret
899
899
900
900
901 def matchandpats(
901 def matchandpats(
902 ctx, pats=(), opts=None, globbed=False, default=b'relpath', badfn=None
902 ctx, pats=(), opts=None, globbed=False, default=b'relpath', badfn=None
903 ):
903 ):
904 """Return a matcher and the patterns that were used.
904 """Return a matcher and the patterns that were used.
905 The matcher will warn about bad matches, unless an alternate badfn callback
905 The matcher will warn about bad matches, unless an alternate badfn callback
906 is provided."""
906 is provided."""
907 if opts is None:
907 if opts is None:
908 opts = {}
908 opts = {}
909 if not globbed and default == b'relpath':
909 if not globbed and default == b'relpath':
910 pats = expandpats(pats or [])
910 pats = expandpats(pats or [])
911
911
912 uipathfn = getuipathfn(ctx.repo(), legacyrelativevalue=True)
912 uipathfn = getuipathfn(ctx.repo(), legacyrelativevalue=True)
913
913
914 def bad(f, msg):
914 def bad(f, msg):
915 ctx.repo().ui.warn(b"%s: %s\n" % (uipathfn(f), msg))
915 ctx.repo().ui.warn(b"%s: %s\n" % (uipathfn(f), msg))
916
916
917 if badfn is None:
917 if badfn is None:
918 badfn = bad
918 badfn = bad
919
919
920 m = ctx.match(
920 m = ctx.match(
921 pats,
921 pats,
922 opts.get(b'include'),
922 opts.get(b'include'),
923 opts.get(b'exclude'),
923 opts.get(b'exclude'),
924 default,
924 default,
925 listsubrepos=opts.get(b'subrepos'),
925 listsubrepos=opts.get(b'subrepos'),
926 badfn=badfn,
926 badfn=badfn,
927 )
927 )
928
928
929 if m.always():
929 if m.always():
930 pats = []
930 pats = []
931 return m, pats
931 return m, pats
932
932
933
933
934 def match(
934 def match(
935 ctx, pats=(), opts=None, globbed=False, default=b'relpath', badfn=None
935 ctx, pats=(), opts=None, globbed=False, default=b'relpath', badfn=None
936 ):
936 ):
937 '''Return a matcher that will warn about bad matches.'''
937 '''Return a matcher that will warn about bad matches.'''
938 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
938 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
939
939
940
940
941 def matchall(repo):
941 def matchall(repo):
942 '''Return a matcher that will efficiently match everything.'''
942 '''Return a matcher that will efficiently match everything.'''
943 return matchmod.always()
943 return matchmod.always()
944
944
945
945
946 def matchfiles(repo, files, badfn=None):
946 def matchfiles(repo, files, badfn=None):
947 '''Return a matcher that will efficiently match exactly these files.'''
947 '''Return a matcher that will efficiently match exactly these files.'''
948 return matchmod.exact(files, badfn=badfn)
948 return matchmod.exact(files, badfn=badfn)
949
949
950
950
951 def parsefollowlinespattern(repo, rev, pat, msg):
951 def parsefollowlinespattern(repo, rev, pat, msg):
952 """Return a file name from `pat` pattern suitable for usage in followlines
952 """Return a file name from `pat` pattern suitable for usage in followlines
953 logic.
953 logic.
954 """
954 """
955 if not matchmod.patkind(pat):
955 if not matchmod.patkind(pat):
956 return pathutil.canonpath(repo.root, repo.getcwd(), pat)
956 return pathutil.canonpath(repo.root, repo.getcwd(), pat)
957 else:
957 else:
958 ctx = repo[rev]
958 ctx = repo[rev]
959 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=ctx)
959 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=ctx)
960 files = [f for f in ctx if m(f)]
960 files = [f for f in ctx if m(f)]
961 if len(files) != 1:
961 if len(files) != 1:
962 raise error.ParseError(msg)
962 raise error.ParseError(msg)
963 return files[0]
963 return files[0]
964
964
965
965
966 def getorigvfs(ui, repo):
966 def getorigvfs(ui, repo):
967 """return a vfs suitable to save 'orig' file
967 """return a vfs suitable to save 'orig' file
968
968
969 return None if no special directory is configured"""
969 return None if no special directory is configured"""
970 origbackuppath = ui.config(b'ui', b'origbackuppath')
970 origbackuppath = ui.config(b'ui', b'origbackuppath')
971 if not origbackuppath:
971 if not origbackuppath:
972 return None
972 return None
973 return vfs.vfs(repo.wvfs.join(origbackuppath))
973 return vfs.vfs(repo.wvfs.join(origbackuppath))
974
974
975
975
976 def backuppath(ui, repo, filepath):
976 def backuppath(ui, repo, filepath):
977 """customize where working copy backup files (.orig files) are created
977 """customize where working copy backup files (.orig files) are created
978
978
979 Fetch user defined path from config file: [ui] origbackuppath = <path>
979 Fetch user defined path from config file: [ui] origbackuppath = <path>
980 Fall back to default (filepath with .orig suffix) if not specified
980 Fall back to default (filepath with .orig suffix) if not specified
981
981
982 filepath is repo-relative
982 filepath is repo-relative
983
983
984 Returns an absolute path
984 Returns an absolute path
985 """
985 """
986 origvfs = getorigvfs(ui, repo)
986 origvfs = getorigvfs(ui, repo)
987 if origvfs is None:
987 if origvfs is None:
988 return repo.wjoin(filepath + b".orig")
988 return repo.wjoin(filepath + b".orig")
989
989
990 origbackupdir = origvfs.dirname(filepath)
990 origbackupdir = origvfs.dirname(filepath)
991 if not origvfs.isdir(origbackupdir) or origvfs.islink(origbackupdir):
991 if not origvfs.isdir(origbackupdir) or origvfs.islink(origbackupdir):
992 ui.note(_(b'creating directory: %s\n') % origvfs.join(origbackupdir))
992 ui.note(_(b'creating directory: %s\n') % origvfs.join(origbackupdir))
993
993
994 # Remove any files that conflict with the backup file's path
994 # Remove any files that conflict with the backup file's path
995 for f in reversed(list(pathutil.finddirs(filepath))):
995 for f in reversed(list(pathutil.finddirs(filepath))):
996 if origvfs.isfileorlink(f):
996 if origvfs.isfileorlink(f):
997 ui.note(_(b'removing conflicting file: %s\n') % origvfs.join(f))
997 ui.note(_(b'removing conflicting file: %s\n') % origvfs.join(f))
998 origvfs.unlink(f)
998 origvfs.unlink(f)
999 break
999 break
1000
1000
1001 origvfs.makedirs(origbackupdir)
1001 origvfs.makedirs(origbackupdir)
1002
1002
1003 if origvfs.isdir(filepath) and not origvfs.islink(filepath):
1003 if origvfs.isdir(filepath) and not origvfs.islink(filepath):
1004 ui.note(
1004 ui.note(
1005 _(b'removing conflicting directory: %s\n') % origvfs.join(filepath)
1005 _(b'removing conflicting directory: %s\n') % origvfs.join(filepath)
1006 )
1006 )
1007 origvfs.rmtree(filepath, forcibly=True)
1007 origvfs.rmtree(filepath, forcibly=True)
1008
1008
1009 return origvfs.join(filepath)
1009 return origvfs.join(filepath)
1010
1010
1011
1011
1012 class _containsnode(object):
1012 class _containsnode(object):
1013 """proxy __contains__(node) to container.__contains__ which accepts revs"""
1013 """proxy __contains__(node) to container.__contains__ which accepts revs"""
1014
1014
1015 def __init__(self, repo, revcontainer):
1015 def __init__(self, repo, revcontainer):
1016 self._torev = repo.changelog.rev
1016 self._torev = repo.changelog.rev
1017 self._revcontains = revcontainer.__contains__
1017 self._revcontains = revcontainer.__contains__
1018
1018
1019 def __contains__(self, node):
1019 def __contains__(self, node):
1020 return self._revcontains(self._torev(node))
1020 return self._revcontains(self._torev(node))
1021
1021
1022
1022
1023 def cleanupnodes(
1023 def cleanupnodes(
1024 repo,
1024 repo,
1025 replacements,
1025 replacements,
1026 operation,
1026 operation,
1027 moves=None,
1027 moves=None,
1028 metadata=None,
1028 metadata=None,
1029 fixphase=False,
1029 fixphase=False,
1030 targetphase=None,
1030 targetphase=None,
1031 backup=True,
1031 backup=True,
1032 ):
1032 ):
1033 """do common cleanups when old nodes are replaced by new nodes
1033 """do common cleanups when old nodes are replaced by new nodes
1034
1034
1035 That includes writing obsmarkers or stripping nodes, and moving bookmarks.
1035 That includes writing obsmarkers or stripping nodes, and moving bookmarks.
1036 (we might also want to move working directory parent in the future)
1036 (we might also want to move working directory parent in the future)
1037
1037
1038 By default, bookmark moves are calculated automatically from 'replacements',
1038 By default, bookmark moves are calculated automatically from 'replacements',
1039 but 'moves' can be used to override that. Also, 'moves' may include
1039 but 'moves' can be used to override that. Also, 'moves' may include
1040 additional bookmark moves that should not have associated obsmarkers.
1040 additional bookmark moves that should not have associated obsmarkers.
1041
1041
1042 replacements is {oldnode: [newnode]} or a iterable of nodes if they do not
1042 replacements is {oldnode: [newnode]} or a iterable of nodes if they do not
1043 have replacements. operation is a string, like "rebase".
1043 have replacements. operation is a string, like "rebase".
1044
1044
1045 metadata is dictionary containing metadata to be stored in obsmarker if
1045 metadata is dictionary containing metadata to be stored in obsmarker if
1046 obsolescence is enabled.
1046 obsolescence is enabled.
1047 """
1047 """
1048 assert fixphase or targetphase is None
1048 assert fixphase or targetphase is None
1049 if not replacements and not moves:
1049 if not replacements and not moves:
1050 return
1050 return
1051
1051
1052 # translate mapping's other forms
1052 # translate mapping's other forms
1053 if not util.safehasattr(replacements, b'items'):
1053 if not util.safehasattr(replacements, b'items'):
1054 replacements = {(n,): () for n in replacements}
1054 replacements = {(n,): () for n in replacements}
1055 else:
1055 else:
1056 # upgrading non tuple "source" to tuple ones for BC
1056 # upgrading non tuple "source" to tuple ones for BC
1057 repls = {}
1057 repls = {}
1058 for key, value in replacements.items():
1058 for key, value in replacements.items():
1059 if not isinstance(key, tuple):
1059 if not isinstance(key, tuple):
1060 key = (key,)
1060 key = (key,)
1061 repls[key] = value
1061 repls[key] = value
1062 replacements = repls
1062 replacements = repls
1063
1063
1064 # Unfiltered repo is needed since nodes in replacements might be hidden.
1064 # Unfiltered repo is needed since nodes in replacements might be hidden.
1065 unfi = repo.unfiltered()
1065 unfi = repo.unfiltered()
1066
1066
1067 # Calculate bookmark movements
1067 # Calculate bookmark movements
1068 if moves is None:
1068 if moves is None:
1069 moves = {}
1069 moves = {}
1070 for oldnodes, newnodes in replacements.items():
1070 for oldnodes, newnodes in replacements.items():
1071 for oldnode in oldnodes:
1071 for oldnode in oldnodes:
1072 if oldnode in moves:
1072 if oldnode in moves:
1073 continue
1073 continue
1074 if len(newnodes) > 1:
1074 if len(newnodes) > 1:
1075 # usually a split, take the one with biggest rev number
1075 # usually a split, take the one with biggest rev number
1076 newnode = next(unfi.set(b'max(%ln)', newnodes)).node()
1076 newnode = next(unfi.set(b'max(%ln)', newnodes)).node()
1077 elif len(newnodes) == 0:
1077 elif len(newnodes) == 0:
1078 # move bookmark backwards
1078 # move bookmark backwards
1079 allreplaced = []
1079 allreplaced = []
1080 for rep in replacements:
1080 for rep in replacements:
1081 allreplaced.extend(rep)
1081 allreplaced.extend(rep)
1082 roots = list(
1082 roots = list(
1083 unfi.set(b'max((::%n) - %ln)', oldnode, allreplaced)
1083 unfi.set(b'max((::%n) - %ln)', oldnode, allreplaced)
1084 )
1084 )
1085 if roots:
1085 if roots:
1086 newnode = roots[0].node()
1086 newnode = roots[0].node()
1087 else:
1087 else:
1088 newnode = repo.nullid
1088 newnode = repo.nullid
1089 else:
1089 else:
1090 newnode = newnodes[0]
1090 newnode = newnodes[0]
1091 moves[oldnode] = newnode
1091 moves[oldnode] = newnode
1092
1092
1093 allnewnodes = [n for ns in replacements.values() for n in ns]
1093 allnewnodes = [n for ns in replacements.values() for n in ns]
1094 toretract = {}
1094 toretract = {}
1095 toadvance = {}
1095 toadvance = {}
1096 if fixphase:
1096 if fixphase:
1097 precursors = {}
1097 precursors = {}
1098 for oldnodes, newnodes in replacements.items():
1098 for oldnodes, newnodes in replacements.items():
1099 for oldnode in oldnodes:
1099 for oldnode in oldnodes:
1100 for newnode in newnodes:
1100 for newnode in newnodes:
1101 precursors.setdefault(newnode, []).append(oldnode)
1101 precursors.setdefault(newnode, []).append(oldnode)
1102
1102
1103 allnewnodes.sort(key=lambda n: unfi[n].rev())
1103 allnewnodes.sort(key=lambda n: unfi[n].rev())
1104 newphases = {}
1104 newphases = {}
1105
1105
1106 def phase(ctx):
1106 def phase(ctx):
1107 return newphases.get(ctx.node(), ctx.phase())
1107 return newphases.get(ctx.node(), ctx.phase())
1108
1108
1109 for newnode in allnewnodes:
1109 for newnode in allnewnodes:
1110 ctx = unfi[newnode]
1110 ctx = unfi[newnode]
1111 parentphase = max(phase(p) for p in ctx.parents())
1111 parentphase = max(phase(p) for p in ctx.parents())
1112 if targetphase is None:
1112 if targetphase is None:
1113 oldphase = max(
1113 oldphase = max(
1114 unfi[oldnode].phase() for oldnode in precursors[newnode]
1114 unfi[oldnode].phase() for oldnode in precursors[newnode]
1115 )
1115 )
1116 newphase = max(oldphase, parentphase)
1116 newphase = max(oldphase, parentphase)
1117 else:
1117 else:
1118 newphase = max(targetphase, parentphase)
1118 newphase = max(targetphase, parentphase)
1119 newphases[newnode] = newphase
1119 newphases[newnode] = newphase
1120 if newphase > ctx.phase():
1120 if newphase > ctx.phase():
1121 toretract.setdefault(newphase, []).append(newnode)
1121 toretract.setdefault(newphase, []).append(newnode)
1122 elif newphase < ctx.phase():
1122 elif newphase < ctx.phase():
1123 toadvance.setdefault(newphase, []).append(newnode)
1123 toadvance.setdefault(newphase, []).append(newnode)
1124
1124
1125 with repo.transaction(b'cleanup') as tr:
1125 with repo.transaction(b'cleanup') as tr:
1126 # Move bookmarks
1126 # Move bookmarks
1127 bmarks = repo._bookmarks
1127 bmarks = repo._bookmarks
1128 bmarkchanges = []
1128 bmarkchanges = []
1129 for oldnode, newnode in moves.items():
1129 for oldnode, newnode in moves.items():
1130 oldbmarks = repo.nodebookmarks(oldnode)
1130 oldbmarks = repo.nodebookmarks(oldnode)
1131 if not oldbmarks:
1131 if not oldbmarks:
1132 continue
1132 continue
1133 from . import bookmarks # avoid import cycle
1133 from . import bookmarks # avoid import cycle
1134
1134
1135 repo.ui.debug(
1135 repo.ui.debug(
1136 b'moving bookmarks %r from %s to %s\n'
1136 b'moving bookmarks %r from %s to %s\n'
1137 % (
1137 % (
1138 pycompat.rapply(pycompat.maybebytestr, oldbmarks),
1138 pycompat.rapply(pycompat.maybebytestr, oldbmarks),
1139 hex(oldnode),
1139 hex(oldnode),
1140 hex(newnode),
1140 hex(newnode),
1141 )
1141 )
1142 )
1142 )
1143 # Delete divergent bookmarks being parents of related newnodes
1143 # Delete divergent bookmarks being parents of related newnodes
1144 deleterevs = repo.revs(
1144 deleterevs = repo.revs(
1145 b'parents(roots(%ln & (::%n))) - parents(%n)',
1145 b'parents(roots(%ln & (::%n))) - parents(%n)',
1146 allnewnodes,
1146 allnewnodes,
1147 newnode,
1147 newnode,
1148 oldnode,
1148 oldnode,
1149 )
1149 )
1150 deletenodes = _containsnode(repo, deleterevs)
1150 deletenodes = _containsnode(repo, deleterevs)
1151 for name in oldbmarks:
1151 for name in oldbmarks:
1152 bmarkchanges.append((name, newnode))
1152 bmarkchanges.append((name, newnode))
1153 for b in bookmarks.divergent2delete(repo, deletenodes, name):
1153 for b in bookmarks.divergent2delete(repo, deletenodes, name):
1154 bmarkchanges.append((b, None))
1154 bmarkchanges.append((b, None))
1155
1155
1156 if bmarkchanges:
1156 if bmarkchanges:
1157 bmarks.applychanges(repo, tr, bmarkchanges)
1157 bmarks.applychanges(repo, tr, bmarkchanges)
1158
1158
1159 for phase, nodes in toretract.items():
1159 for phase, nodes in toretract.items():
1160 phases.retractboundary(repo, tr, phase, nodes)
1160 phases.retractboundary(repo, tr, phase, nodes)
1161 for phase, nodes in toadvance.items():
1161 for phase, nodes in toadvance.items():
1162 phases.advanceboundary(repo, tr, phase, nodes)
1162 phases.advanceboundary(repo, tr, phase, nodes)
1163
1163
1164 mayusearchived = repo.ui.config(b'experimental', b'cleanup-as-archived')
1164 mayusearchived = repo.ui.config(b'experimental', b'cleanup-as-archived')
1165 # Obsolete or strip nodes
1165 # Obsolete or strip nodes
1166 if obsolete.isenabled(repo, obsolete.createmarkersopt):
1166 if obsolete.isenabled(repo, obsolete.createmarkersopt):
1167 # If a node is already obsoleted, and we want to obsolete it
1167 # If a node is already obsoleted, and we want to obsolete it
1168 # without a successor, skip that obssolete request since it's
1168 # without a successor, skip that obssolete request since it's
1169 # unnecessary. That's the "if s or not isobs(n)" check below.
1169 # unnecessary. That's the "if s or not isobs(n)" check below.
1170 # Also sort the node in topology order, that might be useful for
1170 # Also sort the node in topology order, that might be useful for
1171 # some obsstore logic.
1171 # some obsstore logic.
1172 # NOTE: the sorting might belong to createmarkers.
1172 # NOTE: the sorting might belong to createmarkers.
1173 torev = unfi.changelog.rev
1173 torev = unfi.changelog.rev
1174 sortfunc = lambda ns: torev(ns[0][0])
1174 sortfunc = lambda ns: torev(ns[0][0])
1175 rels = []
1175 rels = []
1176 for ns, s in sorted(replacements.items(), key=sortfunc):
1176 for ns, s in sorted(replacements.items(), key=sortfunc):
1177 rel = (tuple(unfi[n] for n in ns), tuple(unfi[m] for m in s))
1177 rel = (tuple(unfi[n] for n in ns), tuple(unfi[m] for m in s))
1178 rels.append(rel)
1178 rels.append(rel)
1179 if rels:
1179 if rels:
1180 obsolete.createmarkers(
1180 obsolete.createmarkers(
1181 repo, rels, operation=operation, metadata=metadata
1181 repo, rels, operation=operation, metadata=metadata
1182 )
1182 )
1183 elif phases.supportinternal(repo) and mayusearchived:
1183 elif phases.supportinternal(repo) and mayusearchived:
1184 # this assume we do not have "unstable" nodes above the cleaned ones
1184 # this assume we do not have "unstable" nodes above the cleaned ones
1185 allreplaced = set()
1185 allreplaced = set()
1186 for ns in replacements.keys():
1186 for ns in replacements.keys():
1187 allreplaced.update(ns)
1187 allreplaced.update(ns)
1188 if backup:
1188 if backup:
1189 from . import repair # avoid import cycle
1189 from . import repair # avoid import cycle
1190
1190
1191 node = min(allreplaced, key=repo.changelog.rev)
1191 node = min(allreplaced, key=repo.changelog.rev)
1192 repair.backupbundle(
1192 repair.backupbundle(
1193 repo, allreplaced, allreplaced, node, operation
1193 repo, allreplaced, allreplaced, node, operation
1194 )
1194 )
1195 phases.retractboundary(repo, tr, phases.archived, allreplaced)
1195 phases.retractboundary(repo, tr, phases.archived, allreplaced)
1196 else:
1196 else:
1197 from . import repair # avoid import cycle
1197 from . import repair # avoid import cycle
1198
1198
1199 tostrip = list(n for ns in replacements for n in ns)
1199 tostrip = list(n for ns in replacements for n in ns)
1200 if tostrip:
1200 if tostrip:
1201 repair.delayedstrip(
1201 repair.delayedstrip(
1202 repo.ui, repo, tostrip, operation, backup=backup
1202 repo.ui, repo, tostrip, operation, backup=backup
1203 )
1203 )
1204
1204
1205
1205
1206 def addremove(repo, matcher, prefix, uipathfn, opts=None):
1206 def addremove(repo, matcher, prefix, uipathfn, opts=None):
1207 if opts is None:
1207 if opts is None:
1208 opts = {}
1208 opts = {}
1209 m = matcher
1209 m = matcher
1210 dry_run = opts.get(b'dry_run')
1210 dry_run = opts.get(b'dry_run')
1211 try:
1211 try:
1212 similarity = float(opts.get(b'similarity') or 0)
1212 similarity = float(opts.get(b'similarity') or 0)
1213 except ValueError:
1213 except ValueError:
1214 raise error.InputError(_(b'similarity must be a number'))
1214 raise error.InputError(_(b'similarity must be a number'))
1215 if similarity < 0 or similarity > 100:
1215 if similarity < 0 or similarity > 100:
1216 raise error.InputError(_(b'similarity must be between 0 and 100'))
1216 raise error.InputError(_(b'similarity must be between 0 and 100'))
1217 similarity /= 100.0
1217 similarity /= 100.0
1218
1218
1219 ret = 0
1219 ret = 0
1220
1220
1221 wctx = repo[None]
1221 wctx = repo[None]
1222 for subpath in sorted(wctx.substate):
1222 for subpath in sorted(wctx.substate):
1223 submatch = matchmod.subdirmatcher(subpath, m)
1223 submatch = matchmod.subdirmatcher(subpath, m)
1224 if opts.get(b'subrepos') or m.exact(subpath) or any(submatch.files()):
1224 if opts.get(b'subrepos') or m.exact(subpath) or any(submatch.files()):
1225 sub = wctx.sub(subpath)
1225 sub = wctx.sub(subpath)
1226 subprefix = repo.wvfs.reljoin(prefix, subpath)
1226 subprefix = repo.wvfs.reljoin(prefix, subpath)
1227 subuipathfn = subdiruipathfn(subpath, uipathfn)
1227 subuipathfn = subdiruipathfn(subpath, uipathfn)
1228 try:
1228 try:
1229 if sub.addremove(submatch, subprefix, subuipathfn, opts):
1229 if sub.addremove(submatch, subprefix, subuipathfn, opts):
1230 ret = 1
1230 ret = 1
1231 except error.LookupError:
1231 except error.LookupError:
1232 repo.ui.status(
1232 repo.ui.status(
1233 _(b"skipping missing subrepository: %s\n")
1233 _(b"skipping missing subrepository: %s\n")
1234 % uipathfn(subpath)
1234 % uipathfn(subpath)
1235 )
1235 )
1236
1236
1237 rejected = []
1237 rejected = []
1238
1238
1239 def badfn(f, msg):
1239 def badfn(f, msg):
1240 if f in m.files():
1240 if f in m.files():
1241 m.bad(f, msg)
1241 m.bad(f, msg)
1242 rejected.append(f)
1242 rejected.append(f)
1243
1243
1244 badmatch = matchmod.badmatch(m, badfn)
1244 badmatch = matchmod.badmatch(m, badfn)
1245 added, unknown, deleted, removed, forgotten = _interestingfiles(
1245 added, unknown, deleted, removed, forgotten = _interestingfiles(
1246 repo, badmatch
1246 repo, badmatch
1247 )
1247 )
1248
1248
1249 unknownset = set(unknown + forgotten)
1249 unknownset = set(unknown + forgotten)
1250 toprint = unknownset.copy()
1250 toprint = unknownset.copy()
1251 toprint.update(deleted)
1251 toprint.update(deleted)
1252 for abs in sorted(toprint):
1252 for abs in sorted(toprint):
1253 if repo.ui.verbose or not m.exact(abs):
1253 if repo.ui.verbose or not m.exact(abs):
1254 if abs in unknownset:
1254 if abs in unknownset:
1255 status = _(b'adding %s\n') % uipathfn(abs)
1255 status = _(b'adding %s\n') % uipathfn(abs)
1256 label = b'ui.addremove.added'
1256 label = b'ui.addremove.added'
1257 else:
1257 else:
1258 status = _(b'removing %s\n') % uipathfn(abs)
1258 status = _(b'removing %s\n') % uipathfn(abs)
1259 label = b'ui.addremove.removed'
1259 label = b'ui.addremove.removed'
1260 repo.ui.status(status, label=label)
1260 repo.ui.status(status, label=label)
1261
1261
1262 renames = _findrenames(
1262 renames = _findrenames(
1263 repo, m, added + unknown, removed + deleted, similarity, uipathfn
1263 repo, m, added + unknown, removed + deleted, similarity, uipathfn
1264 )
1264 )
1265
1265
1266 if not dry_run:
1266 if not dry_run:
1267 _markchanges(repo, unknown + forgotten, deleted, renames)
1267 _markchanges(repo, unknown + forgotten, deleted, renames)
1268
1268
1269 for f in rejected:
1269 for f in rejected:
1270 if f in m.files():
1270 if f in m.files():
1271 return 1
1271 return 1
1272 return ret
1272 return ret
1273
1273
1274
1274
1275 def marktouched(repo, files, similarity=0.0):
1275 def marktouched(repo, files, similarity=0.0):
1276 """Assert that files have somehow been operated upon. files are relative to
1276 """Assert that files have somehow been operated upon. files are relative to
1277 the repo root."""
1277 the repo root."""
1278 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
1278 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
1279 rejected = []
1279 rejected = []
1280
1280
1281 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
1281 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
1282
1282
1283 if repo.ui.verbose:
1283 if repo.ui.verbose:
1284 unknownset = set(unknown + forgotten)
1284 unknownset = set(unknown + forgotten)
1285 toprint = unknownset.copy()
1285 toprint = unknownset.copy()
1286 toprint.update(deleted)
1286 toprint.update(deleted)
1287 for abs in sorted(toprint):
1287 for abs in sorted(toprint):
1288 if abs in unknownset:
1288 if abs in unknownset:
1289 status = _(b'adding %s\n') % abs
1289 status = _(b'adding %s\n') % abs
1290 else:
1290 else:
1291 status = _(b'removing %s\n') % abs
1291 status = _(b'removing %s\n') % abs
1292 repo.ui.status(status)
1292 repo.ui.status(status)
1293
1293
1294 # TODO: We should probably have the caller pass in uipathfn and apply it to
1294 # TODO: We should probably have the caller pass in uipathfn and apply it to
1295 # the messages above too. legacyrelativevalue=True is consistent with how
1295 # the messages above too. legacyrelativevalue=True is consistent with how
1296 # it used to work.
1296 # it used to work.
1297 uipathfn = getuipathfn(repo, legacyrelativevalue=True)
1297 uipathfn = getuipathfn(repo, legacyrelativevalue=True)
1298 renames = _findrenames(
1298 renames = _findrenames(
1299 repo, m, added + unknown, removed + deleted, similarity, uipathfn
1299 repo, m, added + unknown, removed + deleted, similarity, uipathfn
1300 )
1300 )
1301
1301
1302 _markchanges(repo, unknown + forgotten, deleted, renames)
1302 _markchanges(repo, unknown + forgotten, deleted, renames)
1303
1303
1304 for f in rejected:
1304 for f in rejected:
1305 if f in m.files():
1305 if f in m.files():
1306 return 1
1306 return 1
1307 return 0
1307 return 0
1308
1308
1309
1309
1310 def _interestingfiles(repo, matcher):
1310 def _interestingfiles(repo, matcher):
1311 """Walk dirstate with matcher, looking for files that addremove would care
1311 """Walk dirstate with matcher, looking for files that addremove would care
1312 about.
1312 about.
1313
1313
1314 This is different from dirstate.status because it doesn't care about
1314 This is different from dirstate.status because it doesn't care about
1315 whether files are modified or clean."""
1315 whether files are modified or clean."""
1316 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1316 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1317 audit_path = pathutil.pathauditor(repo.root, cached=True)
1317 audit_path = pathutil.pathauditor(repo.root, cached=True)
1318
1318
1319 ctx = repo[None]
1319 ctx = repo[None]
1320 dirstate = repo.dirstate
1320 dirstate = repo.dirstate
1321 matcher = repo.narrowmatch(matcher, includeexact=True)
1321 matcher = repo.narrowmatch(matcher, includeexact=True)
1322 walkresults = dirstate.walk(
1322 walkresults = dirstate.walk(
1323 matcher,
1323 matcher,
1324 subrepos=sorted(ctx.substate),
1324 subrepos=sorted(ctx.substate),
1325 unknown=True,
1325 unknown=True,
1326 ignored=False,
1326 ignored=False,
1327 full=False,
1327 full=False,
1328 )
1328 )
1329 for abs, st in pycompat.iteritems(walkresults):
1329 for abs, st in pycompat.iteritems(walkresults):
1330 entry = dirstate.get_entry(abs)
1330 entry = dirstate.get_entry(abs)
1331 if (not entry.any_tracked) and audit_path.check(abs):
1331 if (not entry.any_tracked) and audit_path.check(abs):
1332 unknown.append(abs)
1332 unknown.append(abs)
1333 elif (not entry.removed) and not st:
1333 elif (not entry.removed) and not st:
1334 deleted.append(abs)
1334 deleted.append(abs)
1335 elif entry.removed and st:
1335 elif entry.removed and st:
1336 forgotten.append(abs)
1336 forgotten.append(abs)
1337 # for finding renames
1337 # for finding renames
1338 elif entry.removed and not st:
1338 elif entry.removed and not st:
1339 removed.append(abs)
1339 removed.append(abs)
1340 elif entry.added:
1340 elif entry.added:
1341 added.append(abs)
1341 added.append(abs)
1342
1342
1343 return added, unknown, deleted, removed, forgotten
1343 return added, unknown, deleted, removed, forgotten
1344
1344
1345
1345
1346 def _findrenames(repo, matcher, added, removed, similarity, uipathfn):
1346 def _findrenames(repo, matcher, added, removed, similarity, uipathfn):
1347 '''Find renames from removed files to added ones.'''
1347 '''Find renames from removed files to added ones.'''
1348 renames = {}
1348 renames = {}
1349 if similarity > 0:
1349 if similarity > 0:
1350 for old, new, score in similar.findrenames(
1350 for old, new, score in similar.findrenames(
1351 repo, added, removed, similarity
1351 repo, added, removed, similarity
1352 ):
1352 ):
1353 if (
1353 if (
1354 repo.ui.verbose
1354 repo.ui.verbose
1355 or not matcher.exact(old)
1355 or not matcher.exact(old)
1356 or not matcher.exact(new)
1356 or not matcher.exact(new)
1357 ):
1357 ):
1358 repo.ui.status(
1358 repo.ui.status(
1359 _(
1359 _(
1360 b'recording removal of %s as rename to %s '
1360 b'recording removal of %s as rename to %s '
1361 b'(%d%% similar)\n'
1361 b'(%d%% similar)\n'
1362 )
1362 )
1363 % (uipathfn(old), uipathfn(new), score * 100)
1363 % (uipathfn(old), uipathfn(new), score * 100)
1364 )
1364 )
1365 renames[new] = old
1365 renames[new] = old
1366 return renames
1366 return renames
1367
1367
1368
1368
1369 def _markchanges(repo, unknown, deleted, renames):
1369 def _markchanges(repo, unknown, deleted, renames):
1370 """Marks the files in unknown as added, the files in deleted as removed,
1370 """Marks the files in unknown as added, the files in deleted as removed,
1371 and the files in renames as copied."""
1371 and the files in renames as copied."""
1372 wctx = repo[None]
1372 wctx = repo[None]
1373 with repo.wlock():
1373 with repo.wlock():
1374 wctx.forget(deleted)
1374 wctx.forget(deleted)
1375 wctx.add(unknown)
1375 wctx.add(unknown)
1376 for new, old in pycompat.iteritems(renames):
1376 for new, old in pycompat.iteritems(renames):
1377 wctx.copy(old, new)
1377 wctx.copy(old, new)
1378
1378
1379
1379
1380 def getrenamedfn(repo, endrev=None):
1380 def getrenamedfn(repo, endrev=None):
1381 if copiesmod.usechangesetcentricalgo(repo):
1381 if copiesmod.usechangesetcentricalgo(repo):
1382
1382
1383 def getrenamed(fn, rev):
1383 def getrenamed(fn, rev):
1384 ctx = repo[rev]
1384 ctx = repo[rev]
1385 p1copies = ctx.p1copies()
1385 p1copies = ctx.p1copies()
1386 if fn in p1copies:
1386 if fn in p1copies:
1387 return p1copies[fn]
1387 return p1copies[fn]
1388 p2copies = ctx.p2copies()
1388 p2copies = ctx.p2copies()
1389 if fn in p2copies:
1389 if fn in p2copies:
1390 return p2copies[fn]
1390 return p2copies[fn]
1391 return None
1391 return None
1392
1392
1393 return getrenamed
1393 return getrenamed
1394
1394
1395 rcache = {}
1395 rcache = {}
1396 if endrev is None:
1396 if endrev is None:
1397 endrev = len(repo)
1397 endrev = len(repo)
1398
1398
1399 def getrenamed(fn, rev):
1399 def getrenamed(fn, rev):
1400 """looks up all renames for a file (up to endrev) the first
1400 """looks up all renames for a file (up to endrev) the first
1401 time the file is given. It indexes on the changerev and only
1401 time the file is given. It indexes on the changerev and only
1402 parses the manifest if linkrev != changerev.
1402 parses the manifest if linkrev != changerev.
1403 Returns rename info for fn at changerev rev."""
1403 Returns rename info for fn at changerev rev."""
1404 if fn not in rcache:
1404 if fn not in rcache:
1405 rcache[fn] = {}
1405 rcache[fn] = {}
1406 fl = repo.file(fn)
1406 fl = repo.file(fn)
1407 for i in fl:
1407 for i in fl:
1408 lr = fl.linkrev(i)
1408 lr = fl.linkrev(i)
1409 renamed = fl.renamed(fl.node(i))
1409 renamed = fl.renamed(fl.node(i))
1410 rcache[fn][lr] = renamed and renamed[0]
1410 rcache[fn][lr] = renamed and renamed[0]
1411 if lr >= endrev:
1411 if lr >= endrev:
1412 break
1412 break
1413 if rev in rcache[fn]:
1413 if rev in rcache[fn]:
1414 return rcache[fn][rev]
1414 return rcache[fn][rev]
1415
1415
1416 # If linkrev != rev (i.e. rev not found in rcache) fallback to
1416 # If linkrev != rev (i.e. rev not found in rcache) fallback to
1417 # filectx logic.
1417 # filectx logic.
1418 try:
1418 try:
1419 return repo[rev][fn].copysource()
1419 return repo[rev][fn].copysource()
1420 except error.LookupError:
1420 except error.LookupError:
1421 return None
1421 return None
1422
1422
1423 return getrenamed
1423 return getrenamed
1424
1424
1425
1425
1426 def getcopiesfn(repo, endrev=None):
1426 def getcopiesfn(repo, endrev=None):
1427 if copiesmod.usechangesetcentricalgo(repo):
1427 if copiesmod.usechangesetcentricalgo(repo):
1428
1428
1429 def copiesfn(ctx):
1429 def copiesfn(ctx):
1430 if ctx.p2copies():
1430 if ctx.p2copies():
1431 allcopies = ctx.p1copies().copy()
1431 allcopies = ctx.p1copies().copy()
1432 # There should be no overlap
1432 # There should be no overlap
1433 allcopies.update(ctx.p2copies())
1433 allcopies.update(ctx.p2copies())
1434 return sorted(allcopies.items())
1434 return sorted(allcopies.items())
1435 else:
1435 else:
1436 return sorted(ctx.p1copies().items())
1436 return sorted(ctx.p1copies().items())
1437
1437
1438 else:
1438 else:
1439 getrenamed = getrenamedfn(repo, endrev)
1439 getrenamed = getrenamedfn(repo, endrev)
1440
1440
1441 def copiesfn(ctx):
1441 def copiesfn(ctx):
1442 copies = []
1442 copies = []
1443 for fn in ctx.files():
1443 for fn in ctx.files():
1444 rename = getrenamed(fn, ctx.rev())
1444 rename = getrenamed(fn, ctx.rev())
1445 if rename:
1445 if rename:
1446 copies.append((fn, rename))
1446 copies.append((fn, rename))
1447 return copies
1447 return copies
1448
1448
1449 return copiesfn
1449 return copiesfn
1450
1450
1451
1451
1452 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1452 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1453 """Update the dirstate to reflect the intent of copying src to dst. For
1453 """Update the dirstate to reflect the intent of copying src to dst. For
1454 different reasons it might not end with dst being marked as copied from src.
1454 different reasons it might not end with dst being marked as copied from src.
1455 """
1455 """
1456 origsrc = repo.dirstate.copied(src) or src
1456 origsrc = repo.dirstate.copied(src) or src
1457 if dst == origsrc: # copying back a copy?
1457 if dst == origsrc: # copying back a copy?
1458 entry = repo.dirstate.get_entry(dst)
1458 entry = repo.dirstate.get_entry(dst)
1459 if (entry.added or not entry.tracked) and not dryrun:
1459 if (entry.added or not entry.tracked) and not dryrun:
1460 repo.dirstate.set_tracked(dst)
1460 repo.dirstate.set_tracked(dst)
1461 else:
1461 else:
1462 if repo.dirstate.get_entry(origsrc).added and origsrc == src:
1462 if repo.dirstate.get_entry(origsrc).added and origsrc == src:
1463 if not ui.quiet:
1463 if not ui.quiet:
1464 ui.warn(
1464 ui.warn(
1465 _(
1465 _(
1466 b"%s has not been committed yet, so no copy "
1466 b"%s has not been committed yet, so no copy "
1467 b"data will be stored for %s.\n"
1467 b"data will be stored for %s.\n"
1468 )
1468 )
1469 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd))
1469 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd))
1470 )
1470 )
1471 if not repo.dirstate.get_entry(dst).tracked and not dryrun:
1471 if not repo.dirstate.get_entry(dst).tracked and not dryrun:
1472 wctx.add([dst])
1472 wctx.add([dst])
1473 elif not dryrun:
1473 elif not dryrun:
1474 wctx.copy(origsrc, dst)
1474 wctx.copy(origsrc, dst)
1475
1475
1476
1476
1477 def movedirstate(repo, newctx, match=None):
1477 def movedirstate(repo, newctx, match=None):
1478 """Move the dirstate to newctx and adjust it as necessary.
1478 """Move the dirstate to newctx and adjust it as necessary.
1479
1479
1480 A matcher can be provided as an optimization. It is probably a bug to pass
1480 A matcher can be provided as an optimization. It is probably a bug to pass
1481 a matcher that doesn't match all the differences between the parent of the
1481 a matcher that doesn't match all the differences between the parent of the
1482 working copy and newctx.
1482 working copy and newctx.
1483 """
1483 """
1484 oldctx = repo[b'.']
1484 oldctx = repo[b'.']
1485 ds = repo.dirstate
1485 ds = repo.dirstate
1486 copies = dict(ds.copies())
1486 copies = dict(ds.copies())
1487 ds.setparents(newctx.node(), repo.nullid)
1487 ds.setparents(newctx.node(), repo.nullid)
1488 s = newctx.status(oldctx, match=match)
1488 s = newctx.status(oldctx, match=match)
1489
1489
1490 for f in s.modified:
1490 for f in s.modified:
1491 ds.update_file_p1(f, p1_tracked=True)
1491 ds.update_file_p1(f, p1_tracked=True)
1492
1492
1493 for f in s.added:
1493 for f in s.added:
1494 ds.update_file_p1(f, p1_tracked=False)
1494 ds.update_file_p1(f, p1_tracked=False)
1495
1495
1496 for f in s.removed:
1496 for f in s.removed:
1497 ds.update_file_p1(f, p1_tracked=True)
1497 ds.update_file_p1(f, p1_tracked=True)
1498
1498
1499 # Merge old parent and old working dir copies
1499 # Merge old parent and old working dir copies
1500 oldcopies = copiesmod.pathcopies(newctx, oldctx, match)
1500 oldcopies = copiesmod.pathcopies(newctx, oldctx, match)
1501 oldcopies.update(copies)
1501 oldcopies.update(copies)
1502 copies = {
1502 copies = {
1503 dst: oldcopies.get(src, src)
1503 dst: oldcopies.get(src, src)
1504 for dst, src in pycompat.iteritems(oldcopies)
1504 for dst, src in pycompat.iteritems(oldcopies)
1505 }
1505 }
1506 # Adjust the dirstate copies
1506 # Adjust the dirstate copies
1507 for dst, src in pycompat.iteritems(copies):
1507 for dst, src in pycompat.iteritems(copies):
1508 if src not in newctx or dst in newctx or not ds.get_entry(dst).added:
1508 if src not in newctx or dst in newctx or not ds.get_entry(dst).added:
1509 src = None
1509 src = None
1510 ds.copy(src, dst)
1510 ds.copy(src, dst)
1511 repo._quick_access_changeid_invalidate()
1511 repo._quick_access_changeid_invalidate()
1512
1512
1513
1513
1514 def filterrequirements(requirements):
1514 def filterrequirements(requirements):
1515 """filters the requirements into two sets:
1515 """filters the requirements into two sets:
1516
1516
1517 wcreq: requirements which should be written in .hg/requires
1517 wcreq: requirements which should be written in .hg/requires
1518 storereq: which should be written in .hg/store/requires
1518 storereq: which should be written in .hg/store/requires
1519
1519
1520 Returns (wcreq, storereq)
1520 Returns (wcreq, storereq)
1521 """
1521 """
1522 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
1522 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
1523 wc, store = set(), set()
1523 wc, store = set(), set()
1524 for r in requirements:
1524 for r in requirements:
1525 if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
1525 if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
1526 wc.add(r)
1526 wc.add(r)
1527 else:
1527 else:
1528 store.add(r)
1528 store.add(r)
1529 return wc, store
1529 return wc, store
1530 return requirements, None
1530 return requirements, None
1531
1531
1532
1532
1533 def istreemanifest(repo):
1533 def istreemanifest(repo):
1534 """returns whether the repository is using treemanifest or not"""
1534 """returns whether the repository is using treemanifest or not"""
1535 return requirementsmod.TREEMANIFEST_REQUIREMENT in repo.requirements
1535 return requirementsmod.TREEMANIFEST_REQUIREMENT in repo.requirements
1536
1536
1537
1537
1538 def writereporequirements(repo, requirements=None):
1538 def writereporequirements(repo, requirements=None):
1539 """writes requirements for the repo
1539 """writes requirements for the repo
1540
1540
1541 Requirements are written to .hg/requires and .hg/store/requires based
1541 Requirements are written to .hg/requires and .hg/store/requires based
1542 on whether share-safe mode is enabled and which requirements are wdir
1542 on whether share-safe mode is enabled and which requirements are wdir
1543 requirements and which are store requirements
1543 requirements and which are store requirements
1544 """
1544 """
1545 if requirements:
1545 if requirements:
1546 repo.requirements = requirements
1546 repo.requirements = requirements
1547 wcreq, storereq = filterrequirements(repo.requirements)
1547 wcreq, storereq = filterrequirements(repo.requirements)
1548 if wcreq is not None:
1548 if wcreq is not None:
1549 writerequires(repo.vfs, wcreq)
1549 writerequires(repo.vfs, wcreq)
1550 if storereq is not None:
1550 if storereq is not None:
1551 writerequires(repo.svfs, storereq)
1551 writerequires(repo.svfs, storereq)
1552 elif repo.ui.configbool(b'format', b'usestore'):
1552 elif repo.ui.configbool(b'format', b'usestore'):
1553 # only remove store requires if we are using store
1553 # only remove store requires if we are using store
1554 repo.svfs.tryunlink(b'requires')
1554 repo.svfs.tryunlink(b'requires')
1555
1555
1556
1556
1557 def writerequires(opener, requirements):
1557 def writerequires(opener, requirements):
1558 with opener(b'requires', b'w', atomictemp=True) as fp:
1558 with opener(b'requires', b'w', atomictemp=True) as fp:
1559 for r in sorted(requirements):
1559 for r in sorted(requirements):
1560 fp.write(b"%s\n" % r)
1560 fp.write(b"%s\n" % r)
1561
1561
1562
1562
1563 class filecachesubentry(object):
1563 class filecachesubentry(object):
1564 def __init__(self, path, stat):
1564 def __init__(self, path, stat):
1565 self.path = path
1565 self.path = path
1566 self.cachestat = None
1566 self.cachestat = None
1567 self._cacheable = None
1567 self._cacheable = None
1568
1568
1569 if stat:
1569 if stat:
1570 self.cachestat = filecachesubentry.stat(self.path)
1570 self.cachestat = filecachesubentry.stat(self.path)
1571
1571
1572 if self.cachestat:
1572 if self.cachestat:
1573 self._cacheable = self.cachestat.cacheable()
1573 self._cacheable = self.cachestat.cacheable()
1574 else:
1574 else:
1575 # None means we don't know yet
1575 # None means we don't know yet
1576 self._cacheable = None
1576 self._cacheable = None
1577
1577
1578 def refresh(self):
1578 def refresh(self):
1579 if self.cacheable():
1579 if self.cacheable():
1580 self.cachestat = filecachesubentry.stat(self.path)
1580 self.cachestat = filecachesubentry.stat(self.path)
1581
1581
1582 def cacheable(self):
1582 def cacheable(self):
1583 if self._cacheable is not None:
1583 if self._cacheable is not None:
1584 return self._cacheable
1584 return self._cacheable
1585
1585
1586 # we don't know yet, assume it is for now
1586 # we don't know yet, assume it is for now
1587 return True
1587 return True
1588
1588
1589 def changed(self):
1589 def changed(self):
1590 # no point in going further if we can't cache it
1590 # no point in going further if we can't cache it
1591 if not self.cacheable():
1591 if not self.cacheable():
1592 return True
1592 return True
1593
1593
1594 newstat = filecachesubentry.stat(self.path)
1594 newstat = filecachesubentry.stat(self.path)
1595
1595
1596 # we may not know if it's cacheable yet, check again now
1596 # we may not know if it's cacheable yet, check again now
1597 if newstat and self._cacheable is None:
1597 if newstat and self._cacheable is None:
1598 self._cacheable = newstat.cacheable()
1598 self._cacheable = newstat.cacheable()
1599
1599
1600 # check again
1600 # check again
1601 if not self._cacheable:
1601 if not self._cacheable:
1602 return True
1602 return True
1603
1603
1604 if self.cachestat != newstat:
1604 if self.cachestat != newstat:
1605 self.cachestat = newstat
1605 self.cachestat = newstat
1606 return True
1606 return True
1607 else:
1607 else:
1608 return False
1608 return False
1609
1609
1610 @staticmethod
1610 @staticmethod
1611 def stat(path):
1611 def stat(path):
1612 try:
1612 try:
1613 return util.cachestat(path)
1613 return util.cachestat(path)
1614 except OSError as e:
1614 except OSError as e:
1615 if e.errno != errno.ENOENT:
1615 if e.errno != errno.ENOENT:
1616 raise
1616 raise
1617
1617
1618
1618
1619 class filecacheentry(object):
1619 class filecacheentry(object):
1620 def __init__(self, paths, stat=True):
1620 def __init__(self, paths, stat=True):
1621 self._entries = []
1621 self._entries = []
1622 for path in paths:
1622 for path in paths:
1623 self._entries.append(filecachesubentry(path, stat))
1623 self._entries.append(filecachesubentry(path, stat))
1624
1624
1625 def changed(self):
1625 def changed(self):
1626 '''true if any entry has changed'''
1626 '''true if any entry has changed'''
1627 for entry in self._entries:
1627 for entry in self._entries:
1628 if entry.changed():
1628 if entry.changed():
1629 return True
1629 return True
1630 return False
1630 return False
1631
1631
1632 def refresh(self):
1632 def refresh(self):
1633 for entry in self._entries:
1633 for entry in self._entries:
1634 entry.refresh()
1634 entry.refresh()
1635
1635
1636
1636
1637 class filecache(object):
1637 class filecache(object):
1638 """A property like decorator that tracks files under .hg/ for updates.
1638 """A property like decorator that tracks files under .hg/ for updates.
1639
1639
1640 On first access, the files defined as arguments are stat()ed and the
1640 On first access, the files defined as arguments are stat()ed and the
1641 results cached. The decorated function is called. The results are stashed
1641 results cached. The decorated function is called. The results are stashed
1642 away in a ``_filecache`` dict on the object whose method is decorated.
1642 away in a ``_filecache`` dict on the object whose method is decorated.
1643
1643
1644 On subsequent access, the cached result is used as it is set to the
1644 On subsequent access, the cached result is used as it is set to the
1645 instance dictionary.
1645 instance dictionary.
1646
1646
1647 On external property set/delete operations, the caller must update the
1647 On external property set/delete operations, the caller must update the
1648 corresponding _filecache entry appropriately. Use __class__.<attr>.set()
1648 corresponding _filecache entry appropriately. Use __class__.<attr>.set()
1649 instead of directly setting <attr>.
1649 instead of directly setting <attr>.
1650
1650
1651 When using the property API, the cached data is always used if available.
1651 When using the property API, the cached data is always used if available.
1652 No stat() is performed to check if the file has changed.
1652 No stat() is performed to check if the file has changed.
1653
1653
1654 Others can muck about with the state of the ``_filecache`` dict. e.g. they
1654 Others can muck about with the state of the ``_filecache`` dict. e.g. they
1655 can populate an entry before the property's getter is called. In this case,
1655 can populate an entry before the property's getter is called. In this case,
1656 entries in ``_filecache`` will be used during property operations,
1656 entries in ``_filecache`` will be used during property operations,
1657 if available. If the underlying file changes, it is up to external callers
1657 if available. If the underlying file changes, it is up to external callers
1658 to reflect this by e.g. calling ``delattr(obj, attr)`` to remove the cached
1658 to reflect this by e.g. calling ``delattr(obj, attr)`` to remove the cached
1659 method result as well as possibly calling ``del obj._filecache[attr]`` to
1659 method result as well as possibly calling ``del obj._filecache[attr]`` to
1660 remove the ``filecacheentry``.
1660 remove the ``filecacheentry``.
1661 """
1661 """
1662
1662
1663 def __init__(self, *paths):
1663 def __init__(self, *paths):
1664 self.paths = paths
1664 self.paths = paths
1665
1665
1666 def tracked_paths(self, obj):
1666 def tracked_paths(self, obj):
1667 return [self.join(obj, path) for path in self.paths]
1667 return [self.join(obj, path) for path in self.paths]
1668
1668
1669 def join(self, obj, fname):
1669 def join(self, obj, fname):
1670 """Used to compute the runtime path of a cached file.
1670 """Used to compute the runtime path of a cached file.
1671
1671
1672 Users should subclass filecache and provide their own version of this
1672 Users should subclass filecache and provide their own version of this
1673 function to call the appropriate join function on 'obj' (an instance
1673 function to call the appropriate join function on 'obj' (an instance
1674 of the class that its member function was decorated).
1674 of the class that its member function was decorated).
1675 """
1675 """
1676 raise NotImplementedError
1676 raise NotImplementedError
1677
1677
1678 def __call__(self, func):
1678 def __call__(self, func):
1679 self.func = func
1679 self.func = func
1680 self.sname = func.__name__
1680 self.sname = func.__name__
1681 self.name = pycompat.sysbytes(self.sname)
1681 self.name = pycompat.sysbytes(self.sname)
1682 return self
1682 return self
1683
1683
1684 def __get__(self, obj, type=None):
1684 def __get__(self, obj, type=None):
1685 # if accessed on the class, return the descriptor itself.
1685 # if accessed on the class, return the descriptor itself.
1686 if obj is None:
1686 if obj is None:
1687 return self
1687 return self
1688
1688
1689 assert self.sname not in obj.__dict__
1689 assert self.sname not in obj.__dict__
1690
1690
1691 entry = obj._filecache.get(self.name)
1691 entry = obj._filecache.get(self.name)
1692
1692
1693 if entry:
1693 if entry:
1694 if entry.changed():
1694 if entry.changed():
1695 entry.obj = self.func(obj)
1695 entry.obj = self.func(obj)
1696 else:
1696 else:
1697 paths = self.tracked_paths(obj)
1697 paths = self.tracked_paths(obj)
1698
1698
1699 # We stat -before- creating the object so our cache doesn't lie if
1699 # We stat -before- creating the object so our cache doesn't lie if
1700 # a writer modified between the time we read and stat
1700 # a writer modified between the time we read and stat
1701 entry = filecacheentry(paths, True)
1701 entry = filecacheentry(paths, True)
1702 entry.obj = self.func(obj)
1702 entry.obj = self.func(obj)
1703
1703
1704 obj._filecache[self.name] = entry
1704 obj._filecache[self.name] = entry
1705
1705
1706 obj.__dict__[self.sname] = entry.obj
1706 obj.__dict__[self.sname] = entry.obj
1707 return entry.obj
1707 return entry.obj
1708
1708
1709 # don't implement __set__(), which would make __dict__ lookup as slow as
1709 # don't implement __set__(), which would make __dict__ lookup as slow as
1710 # function call.
1710 # function call.
1711
1711
1712 def set(self, obj, value):
1712 def set(self, obj, value):
1713 if self.name not in obj._filecache:
1713 if self.name not in obj._filecache:
1714 # we add an entry for the missing value because X in __dict__
1714 # we add an entry for the missing value because X in __dict__
1715 # implies X in _filecache
1715 # implies X in _filecache
1716 paths = self.tracked_paths(obj)
1716 paths = self.tracked_paths(obj)
1717 ce = filecacheentry(paths, False)
1717 ce = filecacheentry(paths, False)
1718 obj._filecache[self.name] = ce
1718 obj._filecache[self.name] = ce
1719 else:
1719 else:
1720 ce = obj._filecache[self.name]
1720 ce = obj._filecache[self.name]
1721
1721
1722 ce.obj = value # update cached copy
1722 ce.obj = value # update cached copy
1723 obj.__dict__[self.sname] = value # update copy returned by obj.x
1723 obj.__dict__[self.sname] = value # update copy returned by obj.x
1724
1724
1725
1725
1726 def extdatasource(repo, source):
1726 def extdatasource(repo, source):
1727 """Gather a map of rev -> value dict from the specified source
1727 """Gather a map of rev -> value dict from the specified source
1728
1728
1729 A source spec is treated as a URL, with a special case shell: type
1729 A source spec is treated as a URL, with a special case shell: type
1730 for parsing the output from a shell command.
1730 for parsing the output from a shell command.
1731
1731
1732 The data is parsed as a series of newline-separated records where
1732 The data is parsed as a series of newline-separated records where
1733 each record is a revision specifier optionally followed by a space
1733 each record is a revision specifier optionally followed by a space
1734 and a freeform string value. If the revision is known locally, it
1734 and a freeform string value. If the revision is known locally, it
1735 is converted to a rev, otherwise the record is skipped.
1735 is converted to a rev, otherwise the record is skipped.
1736
1736
1737 Note that both key and value are treated as UTF-8 and converted to
1737 Note that both key and value are treated as UTF-8 and converted to
1738 the local encoding. This allows uniformity between local and
1738 the local encoding. This allows uniformity between local and
1739 remote data sources.
1739 remote data sources.
1740 """
1740 """
1741
1741
1742 spec = repo.ui.config(b"extdata", source)
1742 spec = repo.ui.config(b"extdata", source)
1743 if not spec:
1743 if not spec:
1744 raise error.Abort(_(b"unknown extdata source '%s'") % source)
1744 raise error.Abort(_(b"unknown extdata source '%s'") % source)
1745
1745
1746 data = {}
1746 data = {}
1747 src = proc = None
1747 src = proc = None
1748 try:
1748 try:
1749 if spec.startswith(b"shell:"):
1749 if spec.startswith(b"shell:"):
1750 # external commands should be run relative to the repo root
1750 # external commands should be run relative to the repo root
1751 cmd = spec[6:]
1751 cmd = spec[6:]
1752 proc = subprocess.Popen(
1752 proc = subprocess.Popen(
1753 procutil.tonativestr(cmd),
1753 procutil.tonativestr(cmd),
1754 shell=True,
1754 shell=True,
1755 bufsize=-1,
1755 bufsize=-1,
1756 close_fds=procutil.closefds,
1756 close_fds=procutil.closefds,
1757 stdout=subprocess.PIPE,
1757 stdout=subprocess.PIPE,
1758 cwd=procutil.tonativestr(repo.root),
1758 cwd=procutil.tonativestr(repo.root),
1759 )
1759 )
1760 src = proc.stdout
1760 src = proc.stdout
1761 else:
1761 else:
1762 # treat as a URL or file
1762 # treat as a URL or file
1763 src = url.open(repo.ui, spec)
1763 src = url.open(repo.ui, spec)
1764 for l in src:
1764 for l in src:
1765 if b" " in l:
1765 if b" " in l:
1766 k, v = l.strip().split(b" ", 1)
1766 k, v = l.strip().split(b" ", 1)
1767 else:
1767 else:
1768 k, v = l.strip(), b""
1768 k, v = l.strip(), b""
1769
1769
1770 k = encoding.tolocal(k)
1770 k = encoding.tolocal(k)
1771 try:
1771 try:
1772 data[revsingle(repo, k).rev()] = encoding.tolocal(v)
1772 data[revsingle(repo, k).rev()] = encoding.tolocal(v)
1773 except (error.LookupError, error.RepoLookupError, error.InputError):
1773 except (error.LookupError, error.RepoLookupError, error.InputError):
1774 pass # we ignore data for nodes that don't exist locally
1774 pass # we ignore data for nodes that don't exist locally
1775 finally:
1775 finally:
1776 if proc:
1776 if proc:
1777 try:
1777 try:
1778 proc.communicate()
1778 proc.communicate()
1779 except ValueError:
1779 except ValueError:
1780 # This happens if we started iterating src and then
1780 # This happens if we started iterating src and then
1781 # get a parse error on a line. It should be safe to ignore.
1781 # get a parse error on a line. It should be safe to ignore.
1782 pass
1782 pass
1783 if src:
1783 if src:
1784 src.close()
1784 src.close()
1785 if proc and proc.returncode != 0:
1785 if proc and proc.returncode != 0:
1786 raise error.Abort(
1786 raise error.Abort(
1787 _(b"extdata command '%s' failed: %s")
1787 _(b"extdata command '%s' failed: %s")
1788 % (cmd, procutil.explainexit(proc.returncode))
1788 % (cmd, procutil.explainexit(proc.returncode))
1789 )
1789 )
1790
1790
1791 return data
1791 return data
1792
1792
1793
1793
1794 class progress(object):
1794 class progress(object):
1795 def __init__(self, ui, updatebar, topic, unit=b"", total=None):
1795 def __init__(self, ui, updatebar, topic, unit=b"", total=None):
1796 self.ui = ui
1796 self.ui = ui
1797 self.pos = 0
1797 self.pos = 0
1798 self.topic = topic
1798 self.topic = topic
1799 self.unit = unit
1799 self.unit = unit
1800 self.total = total
1800 self.total = total
1801 self.debug = ui.configbool(b'progress', b'debug')
1801 self.debug = ui.configbool(b'progress', b'debug')
1802 self._updatebar = updatebar
1802 self._updatebar = updatebar
1803
1803
1804 def __enter__(self):
1804 def __enter__(self):
1805 return self
1805 return self
1806
1806
1807 def __exit__(self, exc_type, exc_value, exc_tb):
1807 def __exit__(self, exc_type, exc_value, exc_tb):
1808 self.complete()
1808 self.complete()
1809
1809
1810 def update(self, pos, item=b"", total=None):
1810 def update(self, pos, item=b"", total=None):
1811 assert pos is not None
1811 assert pos is not None
1812 if total:
1812 if total:
1813 self.total = total
1813 self.total = total
1814 self.pos = pos
1814 self.pos = pos
1815 self._updatebar(self.topic, self.pos, item, self.unit, self.total)
1815 self._updatebar(self.topic, self.pos, item, self.unit, self.total)
1816 if self.debug:
1816 if self.debug:
1817 self._printdebug(item)
1817 self._printdebug(item)
1818
1818
1819 def increment(self, step=1, item=b"", total=None):
1819 def increment(self, step=1, item=b"", total=None):
1820 self.update(self.pos + step, item, total)
1820 self.update(self.pos + step, item, total)
1821
1821
1822 def complete(self):
1822 def complete(self):
1823 self.pos = None
1823 self.pos = None
1824 self.unit = b""
1824 self.unit = b""
1825 self.total = None
1825 self.total = None
1826 self._updatebar(self.topic, self.pos, b"", self.unit, self.total)
1826 self._updatebar(self.topic, self.pos, b"", self.unit, self.total)
1827
1827
1828 def _printdebug(self, item):
1828 def _printdebug(self, item):
1829 unit = b''
1829 unit = b''
1830 if self.unit:
1830 if self.unit:
1831 unit = b' ' + self.unit
1831 unit = b' ' + self.unit
1832 if item:
1832 if item:
1833 item = b' ' + item
1833 item = b' ' + item
1834
1834
1835 if self.total:
1835 if self.total:
1836 pct = 100.0 * self.pos / self.total
1836 pct = 100.0 * self.pos / self.total
1837 self.ui.debug(
1837 self.ui.debug(
1838 b'%s:%s %d/%d%s (%4.2f%%)\n'
1838 b'%s:%s %d/%d%s (%4.2f%%)\n'
1839 % (self.topic, item, self.pos, self.total, unit, pct)
1839 % (self.topic, item, self.pos, self.total, unit, pct)
1840 )
1840 )
1841 else:
1841 else:
1842 self.ui.debug(b'%s:%s %d%s\n' % (self.topic, item, self.pos, unit))
1842 self.ui.debug(b'%s:%s %d%s\n' % (self.topic, item, self.pos, unit))
1843
1843
1844
1844
1845 def gdinitconfig(ui):
1845 def gdinitconfig(ui):
1846 """helper function to know if a repo should be created as general delta"""
1846 """helper function to know if a repo should be created as general delta"""
1847 # experimental config: format.generaldelta
1847 # experimental config: format.generaldelta
1848 return ui.configbool(b'format', b'generaldelta') or ui.configbool(
1848 return ui.configbool(b'format', b'generaldelta') or ui.configbool(
1849 b'format', b'usegeneraldelta'
1849 b'format', b'usegeneraldelta'
1850 )
1850 )
1851
1851
1852
1852
1853 def gddeltaconfig(ui):
1853 def gddeltaconfig(ui):
1854 """helper function to know if incoming delta should be optimised"""
1854 """helper function to know if incoming delta should be optimised"""
1855 # experimental config: format.generaldelta
1855 # experimental config: format.generaldelta
1856 return ui.configbool(b'format', b'generaldelta')
1856 return ui.configbool(b'format', b'generaldelta')
1857
1857
1858
1858
1859 class simplekeyvaluefile(object):
1859 class simplekeyvaluefile(object):
1860 """A simple file with key=value lines
1860 """A simple file with key=value lines
1861
1861
1862 Keys must be alphanumerics and start with a letter, values must not
1862 Keys must be alphanumerics and start with a letter, values must not
1863 contain '\n' characters"""
1863 contain '\n' characters"""
1864
1864
1865 firstlinekey = b'__firstline'
1865 firstlinekey = b'__firstline'
1866
1866
1867 def __init__(self, vfs, path, keys=None):
1867 def __init__(self, vfs, path, keys=None):
1868 self.vfs = vfs
1868 self.vfs = vfs
1869 self.path = path
1869 self.path = path
1870
1870
1871 def read(self, firstlinenonkeyval=False):
1871 def read(self, firstlinenonkeyval=False):
1872 """Read the contents of a simple key-value file
1872 """Read the contents of a simple key-value file
1873
1873
1874 'firstlinenonkeyval' indicates whether the first line of file should
1874 'firstlinenonkeyval' indicates whether the first line of file should
1875 be treated as a key-value pair or reuturned fully under the
1875 be treated as a key-value pair or reuturned fully under the
1876 __firstline key."""
1876 __firstline key."""
1877 lines = self.vfs.readlines(self.path)
1877 lines = self.vfs.readlines(self.path)
1878 d = {}
1878 d = {}
1879 if firstlinenonkeyval:
1879 if firstlinenonkeyval:
1880 if not lines:
1880 if not lines:
1881 e = _(b"empty simplekeyvalue file")
1881 e = _(b"empty simplekeyvalue file")
1882 raise error.CorruptedState(e)
1882 raise error.CorruptedState(e)
1883 # we don't want to include '\n' in the __firstline
1883 # we don't want to include '\n' in the __firstline
1884 d[self.firstlinekey] = lines[0][:-1]
1884 d[self.firstlinekey] = lines[0][:-1]
1885 del lines[0]
1885 del lines[0]
1886
1886
1887 try:
1887 try:
1888 # the 'if line.strip()' part prevents us from failing on empty
1888 # the 'if line.strip()' part prevents us from failing on empty
1889 # lines which only contain '\n' therefore are not skipped
1889 # lines which only contain '\n' therefore are not skipped
1890 # by 'if line'
1890 # by 'if line'
1891 updatedict = dict(
1891 updatedict = dict(
1892 line[:-1].split(b'=', 1) for line in lines if line.strip()
1892 line[:-1].split(b'=', 1) for line in lines if line.strip()
1893 )
1893 )
1894 if self.firstlinekey in updatedict:
1894 if self.firstlinekey in updatedict:
1895 e = _(b"%r can't be used as a key")
1895 e = _(b"%r can't be used as a key")
1896 raise error.CorruptedState(e % self.firstlinekey)
1896 raise error.CorruptedState(e % self.firstlinekey)
1897 d.update(updatedict)
1897 d.update(updatedict)
1898 except ValueError as e:
1898 except ValueError as e:
1899 raise error.CorruptedState(stringutil.forcebytestr(e))
1899 raise error.CorruptedState(stringutil.forcebytestr(e))
1900 return d
1900 return d
1901
1901
1902 def write(self, data, firstline=None):
1902 def write(self, data, firstline=None):
1903 """Write key=>value mapping to a file
1903 """Write key=>value mapping to a file
1904 data is a dict. Keys must be alphanumerical and start with a letter.
1904 data is a dict. Keys must be alphanumerical and start with a letter.
1905 Values must not contain newline characters.
1905 Values must not contain newline characters.
1906
1906
1907 If 'firstline' is not None, it is written to file before
1907 If 'firstline' is not None, it is written to file before
1908 everything else, as it is, not in a key=value form"""
1908 everything else, as it is, not in a key=value form"""
1909 lines = []
1909 lines = []
1910 if firstline is not None:
1910 if firstline is not None:
1911 lines.append(b'%s\n' % firstline)
1911 lines.append(b'%s\n' % firstline)
1912
1912
1913 for k, v in data.items():
1913 for k, v in data.items():
1914 if k == self.firstlinekey:
1914 if k == self.firstlinekey:
1915 e = b"key name '%s' is reserved" % self.firstlinekey
1915 e = b"key name '%s' is reserved" % self.firstlinekey
1916 raise error.ProgrammingError(e)
1916 raise error.ProgrammingError(e)
1917 if not k[0:1].isalpha():
1917 if not k[0:1].isalpha():
1918 e = b"keys must start with a letter in a key-value file"
1918 e = b"keys must start with a letter in a key-value file"
1919 raise error.ProgrammingError(e)
1919 raise error.ProgrammingError(e)
1920 if not k.isalnum():
1920 if not k.isalnum():
1921 e = b"invalid key name in a simple key-value file"
1921 e = b"invalid key name in a simple key-value file"
1922 raise error.ProgrammingError(e)
1922 raise error.ProgrammingError(e)
1923 if b'\n' in v:
1923 if b'\n' in v:
1924 e = b"invalid value in a simple key-value file"
1924 e = b"invalid value in a simple key-value file"
1925 raise error.ProgrammingError(e)
1925 raise error.ProgrammingError(e)
1926 lines.append(b"%s=%s\n" % (k, v))
1926 lines.append(b"%s=%s\n" % (k, v))
1927 with self.vfs(self.path, mode=b'wb', atomictemp=True) as fp:
1927 with self.vfs(self.path, mode=b'wb', atomictemp=True) as fp:
1928 fp.write(b''.join(lines))
1928 fp.write(b''.join(lines))
1929
1929
1930
1930
1931 _reportobsoletedsource = [
1931 _reportobsoletedsource = [
1932 b'debugobsolete',
1932 b'debugobsolete',
1933 b'pull',
1933 b'pull',
1934 b'push',
1934 b'push',
1935 b'serve',
1935 b'serve',
1936 b'unbundle',
1936 b'unbundle',
1937 ]
1937 ]
1938
1938
1939 _reportnewcssource = [
1939 _reportnewcssource = [
1940 b'pull',
1940 b'pull',
1941 b'unbundle',
1941 b'unbundle',
1942 ]
1942 ]
1943
1943
1944
1944
1945 def prefetchfiles(repo, revmatches):
1945 def prefetchfiles(repo, revmatches):
1946 """Invokes the registered file prefetch functions, allowing extensions to
1946 """Invokes the registered file prefetch functions, allowing extensions to
1947 ensure the corresponding files are available locally, before the command
1947 ensure the corresponding files are available locally, before the command
1948 uses them.
1948 uses them.
1949
1949
1950 Args:
1950 Args:
1951 revmatches: a list of (revision, match) tuples to indicate the files to
1951 revmatches: a list of (revision, match) tuples to indicate the files to
1952 fetch at each revision. If any of the match elements is None, it matches
1952 fetch at each revision. If any of the match elements is None, it matches
1953 all files.
1953 all files.
1954 """
1954 """
1955
1955
1956 def _matcher(m):
1956 def _matcher(m):
1957 if m:
1957 if m:
1958 assert isinstance(m, matchmod.basematcher)
1958 assert isinstance(m, matchmod.basematcher)
1959 # The command itself will complain about files that don't exist, so
1959 # The command itself will complain about files that don't exist, so
1960 # don't duplicate the message.
1960 # don't duplicate the message.
1961 return matchmod.badmatch(m, lambda fn, msg: None)
1961 return matchmod.badmatch(m, lambda fn, msg: None)
1962 else:
1962 else:
1963 return matchall(repo)
1963 return matchall(repo)
1964
1964
1965 revbadmatches = [(rev, _matcher(match)) for (rev, match) in revmatches]
1965 revbadmatches = [(rev, _matcher(match)) for (rev, match) in revmatches]
1966
1966
1967 fileprefetchhooks(repo, revbadmatches)
1967 fileprefetchhooks(repo, revbadmatches)
1968
1968
1969
1969
1970 # a list of (repo, revs, match) prefetch functions
1970 # a list of (repo, revs, match) prefetch functions
1971 fileprefetchhooks = util.hooks()
1971 fileprefetchhooks = util.hooks()
1972
1972
1973 # A marker that tells the evolve extension to suppress its own reporting
1973 # A marker that tells the evolve extension to suppress its own reporting
1974 _reportstroubledchangesets = True
1974 _reportstroubledchangesets = True
1975
1975
1976
1976
1977 def registersummarycallback(repo, otr, txnname=b'', as_validator=False):
1977 def registersummarycallback(repo, otr, txnname=b'', as_validator=False):
1978 """register a callback to issue a summary after the transaction is closed
1978 """register a callback to issue a summary after the transaction is closed
1979
1979
1980 If as_validator is true, then the callbacks are registered as transaction
1980 If as_validator is true, then the callbacks are registered as transaction
1981 validators instead
1981 validators instead
1982 """
1982 """
1983
1983
1984 def txmatch(sources):
1984 def txmatch(sources):
1985 return any(txnname.startswith(source) for source in sources)
1985 return any(txnname.startswith(source) for source in sources)
1986
1986
1987 categories = []
1987 categories = []
1988
1988
1989 def reportsummary(func):
1989 def reportsummary(func):
1990 """decorator for report callbacks."""
1990 """decorator for report callbacks."""
1991 # The repoview life cycle is shorter than the one of the actual
1991 # The repoview life cycle is shorter than the one of the actual
1992 # underlying repository. So the filtered object can die before the
1992 # underlying repository. So the filtered object can die before the
1993 # weakref is used leading to troubles. We keep a reference to the
1993 # weakref is used leading to troubles. We keep a reference to the
1994 # unfiltered object and restore the filtering when retrieving the
1994 # unfiltered object and restore the filtering when retrieving the
1995 # repository through the weakref.
1995 # repository through the weakref.
1996 filtername = repo.filtername
1996 filtername = repo.filtername
1997 reporef = weakref.ref(repo.unfiltered())
1997 reporef = weakref.ref(repo.unfiltered())
1998
1998
1999 def wrapped(tr):
1999 def wrapped(tr):
2000 repo = reporef()
2000 repo = reporef()
2001 if filtername:
2001 if filtername:
2002 assert repo is not None # help pytype
2002 assert repo is not None # help pytype
2003 repo = repo.filtered(filtername)
2003 repo = repo.filtered(filtername)
2004 func(repo, tr)
2004 func(repo, tr)
2005
2005
2006 newcat = b'%02i-txnreport' % len(categories)
2006 newcat = b'%02i-txnreport' % len(categories)
2007 if as_validator:
2007 if as_validator:
2008 otr.addvalidator(newcat, wrapped)
2008 otr.addvalidator(newcat, wrapped)
2009 else:
2009 else:
2010 otr.addpostclose(newcat, wrapped)
2010 otr.addpostclose(newcat, wrapped)
2011 categories.append(newcat)
2011 categories.append(newcat)
2012 return wrapped
2012 return wrapped
2013
2013
2014 @reportsummary
2014 @reportsummary
2015 def reportchangegroup(repo, tr):
2015 def reportchangegroup(repo, tr):
2016 cgchangesets = tr.changes.get(b'changegroup-count-changesets', 0)
2016 cgchangesets = tr.changes.get(b'changegroup-count-changesets', 0)
2017 cgrevisions = tr.changes.get(b'changegroup-count-revisions', 0)
2017 cgrevisions = tr.changes.get(b'changegroup-count-revisions', 0)
2018 cgfiles = tr.changes.get(b'changegroup-count-files', 0)
2018 cgfiles = tr.changes.get(b'changegroup-count-files', 0)
2019 cgheads = tr.changes.get(b'changegroup-count-heads', 0)
2019 cgheads = tr.changes.get(b'changegroup-count-heads', 0)
2020 if cgchangesets or cgrevisions or cgfiles:
2020 if cgchangesets or cgrevisions or cgfiles:
2021 htext = b""
2021 htext = b""
2022 if cgheads:
2022 if cgheads:
2023 htext = _(b" (%+d heads)") % cgheads
2023 htext = _(b" (%+d heads)") % cgheads
2024 msg = _(b"added %d changesets with %d changes to %d files%s\n")
2024 msg = _(b"added %d changesets with %d changes to %d files%s\n")
2025 if as_validator:
2025 if as_validator:
2026 msg = _(b"adding %d changesets with %d changes to %d files%s\n")
2026 msg = _(b"adding %d changesets with %d changes to %d files%s\n")
2027 assert repo is not None # help pytype
2027 assert repo is not None # help pytype
2028 repo.ui.status(msg % (cgchangesets, cgrevisions, cgfiles, htext))
2028 repo.ui.status(msg % (cgchangesets, cgrevisions, cgfiles, htext))
2029
2029
2030 if txmatch(_reportobsoletedsource):
2030 if txmatch(_reportobsoletedsource):
2031
2031
2032 @reportsummary
2032 @reportsummary
2033 def reportobsoleted(repo, tr):
2033 def reportobsoleted(repo, tr):
2034 obsoleted = obsutil.getobsoleted(repo, tr)
2034 obsoleted = obsutil.getobsoleted(repo, tr)
2035 newmarkers = len(tr.changes.get(b'obsmarkers', ()))
2035 newmarkers = len(tr.changes.get(b'obsmarkers', ()))
2036 if newmarkers:
2036 if newmarkers:
2037 repo.ui.status(_(b'%i new obsolescence markers\n') % newmarkers)
2037 repo.ui.status(_(b'%i new obsolescence markers\n') % newmarkers)
2038 if obsoleted:
2038 if obsoleted:
2039 msg = _(b'obsoleted %i changesets\n')
2039 msg = _(b'obsoleted %i changesets\n')
2040 if as_validator:
2040 if as_validator:
2041 msg = _(b'obsoleting %i changesets\n')
2041 msg = _(b'obsoleting %i changesets\n')
2042 repo.ui.status(msg % len(obsoleted))
2042 repo.ui.status(msg % len(obsoleted))
2043
2043
2044 if obsolete.isenabled(
2044 if obsolete.isenabled(
2045 repo, obsolete.createmarkersopt
2045 repo, obsolete.createmarkersopt
2046 ) and repo.ui.configbool(
2046 ) and repo.ui.configbool(
2047 b'experimental', b'evolution.report-instabilities'
2047 b'experimental', b'evolution.report-instabilities'
2048 ):
2048 ):
2049 instabilitytypes = [
2049 instabilitytypes = [
2050 (b'orphan', b'orphan'),
2050 (b'orphan', b'orphan'),
2051 (b'phase-divergent', b'phasedivergent'),
2051 (b'phase-divergent', b'phasedivergent'),
2052 (b'content-divergent', b'contentdivergent'),
2052 (b'content-divergent', b'contentdivergent'),
2053 ]
2053 ]
2054
2054
2055 def getinstabilitycounts(repo):
2055 def getinstabilitycounts(repo):
2056 filtered = repo.changelog.filteredrevs
2056 filtered = repo.changelog.filteredrevs
2057 counts = {}
2057 counts = {}
2058 for instability, revset in instabilitytypes:
2058 for instability, revset in instabilitytypes:
2059 counts[instability] = len(
2059 counts[instability] = len(
2060 set(obsolete.getrevs(repo, revset)) - filtered
2060 set(obsolete.getrevs(repo, revset)) - filtered
2061 )
2061 )
2062 return counts
2062 return counts
2063
2063
2064 oldinstabilitycounts = getinstabilitycounts(repo)
2064 oldinstabilitycounts = getinstabilitycounts(repo)
2065
2065
2066 @reportsummary
2066 @reportsummary
2067 def reportnewinstabilities(repo, tr):
2067 def reportnewinstabilities(repo, tr):
2068 newinstabilitycounts = getinstabilitycounts(repo)
2068 newinstabilitycounts = getinstabilitycounts(repo)
2069 for instability, revset in instabilitytypes:
2069 for instability, revset in instabilitytypes:
2070 delta = (
2070 delta = (
2071 newinstabilitycounts[instability]
2071 newinstabilitycounts[instability]
2072 - oldinstabilitycounts[instability]
2072 - oldinstabilitycounts[instability]
2073 )
2073 )
2074 msg = getinstabilitymessage(delta, instability)
2074 msg = getinstabilitymessage(delta, instability)
2075 if msg:
2075 if msg:
2076 repo.ui.warn(msg)
2076 repo.ui.warn(msg)
2077
2077
2078 if txmatch(_reportnewcssource):
2078 if txmatch(_reportnewcssource):
2079
2079
2080 @reportsummary
2080 @reportsummary
2081 def reportnewcs(repo, tr):
2081 def reportnewcs(repo, tr):
2082 """Report the range of new revisions pulled/unbundled."""
2082 """Report the range of new revisions pulled/unbundled."""
2083 origrepolen = tr.changes.get(b'origrepolen', len(repo))
2083 origrepolen = tr.changes.get(b'origrepolen', len(repo))
2084 unfi = repo.unfiltered()
2084 unfi = repo.unfiltered()
2085 if origrepolen >= len(unfi):
2085 if origrepolen >= len(unfi):
2086 return
2086 return
2087
2087
2088 # Compute the bounds of new visible revisions' range.
2088 # Compute the bounds of new visible revisions' range.
2089 revs = smartset.spanset(repo, start=origrepolen)
2089 revs = smartset.spanset(repo, start=origrepolen)
2090 if revs:
2090 if revs:
2091 minrev, maxrev = repo[revs.min()], repo[revs.max()]
2091 minrev, maxrev = repo[revs.min()], repo[revs.max()]
2092
2092
2093 if minrev == maxrev:
2093 if minrev == maxrev:
2094 revrange = minrev
2094 revrange = minrev
2095 else:
2095 else:
2096 revrange = b'%s:%s' % (minrev, maxrev)
2096 revrange = b'%s:%s' % (minrev, maxrev)
2097 draft = len(repo.revs(b'%ld and draft()', revs))
2097 draft = len(repo.revs(b'%ld and draft()', revs))
2098 secret = len(repo.revs(b'%ld and secret()', revs))
2098 secret = len(repo.revs(b'%ld and secret()', revs))
2099 if not (draft or secret):
2099 if not (draft or secret):
2100 msg = _(b'new changesets %s\n') % revrange
2100 msg = _(b'new changesets %s\n') % revrange
2101 elif draft and secret:
2101 elif draft and secret:
2102 msg = _(b'new changesets %s (%d drafts, %d secrets)\n')
2102 msg = _(b'new changesets %s (%d drafts, %d secrets)\n')
2103 msg %= (revrange, draft, secret)
2103 msg %= (revrange, draft, secret)
2104 elif draft:
2104 elif draft:
2105 msg = _(b'new changesets %s (%d drafts)\n')
2105 msg = _(b'new changesets %s (%d drafts)\n')
2106 msg %= (revrange, draft)
2106 msg %= (revrange, draft)
2107 elif secret:
2107 elif secret:
2108 msg = _(b'new changesets %s (%d secrets)\n')
2108 msg = _(b'new changesets %s (%d secrets)\n')
2109 msg %= (revrange, secret)
2109 msg %= (revrange, secret)
2110 else:
2110 else:
2111 errormsg = b'entered unreachable condition'
2111 errormsg = b'entered unreachable condition'
2112 raise error.ProgrammingError(errormsg)
2112 raise error.ProgrammingError(errormsg)
2113 repo.ui.status(msg)
2113 repo.ui.status(msg)
2114
2114
2115 # search new changesets directly pulled as obsolete
2115 # search new changesets directly pulled as obsolete
2116 duplicates = tr.changes.get(b'revduplicates', ())
2116 duplicates = tr.changes.get(b'revduplicates', ())
2117 obsadded = unfi.revs(
2117 obsadded = unfi.revs(
2118 b'(%d: + %ld) and obsolete()', origrepolen, duplicates
2118 b'(%d: + %ld) and obsolete()', origrepolen, duplicates
2119 )
2119 )
2120 cl = repo.changelog
2120 cl = repo.changelog
2121 extinctadded = [r for r in obsadded if r not in cl]
2121 extinctadded = [r for r in obsadded if r not in cl]
2122 if extinctadded:
2122 if extinctadded:
2123 # They are not just obsolete, but obsolete and invisible
2123 # They are not just obsolete, but obsolete and invisible
2124 # we call them "extinct" internally but the terms have not been
2124 # we call them "extinct" internally but the terms have not been
2125 # exposed to users.
2125 # exposed to users.
2126 msg = b'(%d other changesets obsolete on arrival)\n'
2126 msg = b'(%d other changesets obsolete on arrival)\n'
2127 repo.ui.status(msg % len(extinctadded))
2127 repo.ui.status(msg % len(extinctadded))
2128
2128
2129 @reportsummary
2129 @reportsummary
2130 def reportphasechanges(repo, tr):
2130 def reportphasechanges(repo, tr):
2131 """Report statistics of phase changes for changesets pre-existing
2131 """Report statistics of phase changes for changesets pre-existing
2132 pull/unbundle.
2132 pull/unbundle.
2133 """
2133 """
2134 origrepolen = tr.changes.get(b'origrepolen', len(repo))
2134 origrepolen = tr.changes.get(b'origrepolen', len(repo))
2135 published = []
2135 published = []
2136 for revs, (old, new) in tr.changes.get(b'phases', []):
2136 for revs, (old, new) in tr.changes.get(b'phases', []):
2137 if new != phases.public:
2137 if new != phases.public:
2138 continue
2138 continue
2139 published.extend(rev for rev in revs if rev < origrepolen)
2139 published.extend(rev for rev in revs if rev < origrepolen)
2140 if not published:
2140 if not published:
2141 return
2141 return
2142 msg = _(b'%d local changesets published\n')
2142 msg = _(b'%d local changesets published\n')
2143 if as_validator:
2143 if as_validator:
2144 msg = _(b'%d local changesets will be published\n')
2144 msg = _(b'%d local changesets will be published\n')
2145 repo.ui.status(msg % len(published))
2145 repo.ui.status(msg % len(published))
2146
2146
2147
2147
2148 def getinstabilitymessage(delta, instability):
2148 def getinstabilitymessage(delta, instability):
2149 """function to return the message to show warning about new instabilities
2149 """function to return the message to show warning about new instabilities
2150
2150
2151 exists as a separate function so that extension can wrap to show more
2151 exists as a separate function so that extension can wrap to show more
2152 information like how to fix instabilities"""
2152 information like how to fix instabilities"""
2153 if delta > 0:
2153 if delta > 0:
2154 return _(b'%i new %s changesets\n') % (delta, instability)
2154 return _(b'%i new %s changesets\n') % (delta, instability)
2155
2155
2156
2156
2157 def nodesummaries(repo, nodes, maxnumnodes=4):
2157 def nodesummaries(repo, nodes, maxnumnodes=4):
2158 if len(nodes) <= maxnumnodes or repo.ui.verbose:
2158 if len(nodes) <= maxnumnodes or repo.ui.verbose:
2159 return b' '.join(short(h) for h in nodes)
2159 return b' '.join(short(h) for h in nodes)
2160 first = b' '.join(short(h) for h in nodes[:maxnumnodes])
2160 first = b' '.join(short(h) for h in nodes[:maxnumnodes])
2161 return _(b"%s and %d others") % (first, len(nodes) - maxnumnodes)
2161 return _(b"%s and %d others") % (first, len(nodes) - maxnumnodes)
2162
2162
2163
2163
2164 def enforcesinglehead(repo, tr, desc, accountclosed, filtername):
2164 def enforcesinglehead(repo, tr, desc, accountclosed, filtername):
2165 """check that no named branch has multiple heads"""
2165 """check that no named branch has multiple heads"""
2166 if desc in (b'strip', b'repair'):
2166 if desc in (b'strip', b'repair'):
2167 # skip the logic during strip
2167 # skip the logic during strip
2168 return
2168 return
2169 visible = repo.filtered(filtername)
2169 visible = repo.filtered(filtername)
2170 # possible improvement: we could restrict the check to affected branch
2170 # possible improvement: we could restrict the check to affected branch
2171 bm = visible.branchmap()
2171 bm = visible.branchmap()
2172 for name in bm:
2172 for name in bm:
2173 heads = bm.branchheads(name, closed=accountclosed)
2173 heads = bm.branchheads(name, closed=accountclosed)
2174 if len(heads) > 1:
2174 if len(heads) > 1:
2175 msg = _(b'rejecting multiple heads on branch "%s"')
2175 msg = _(b'rejecting multiple heads on branch "%s"')
2176 msg %= name
2176 msg %= name
2177 hint = _(b'%d heads: %s')
2177 hint = _(b'%d heads: %s')
2178 hint %= (len(heads), nodesummaries(repo, heads))
2178 hint %= (len(heads), nodesummaries(repo, heads))
2179 raise error.Abort(msg, hint=hint)
2179 raise error.Abort(msg, hint=hint)
2180
2180
2181
2181
2182 def wrapconvertsink(sink):
2182 def wrapconvertsink(sink):
2183 """Allow extensions to wrap the sink returned by convcmd.convertsink()
2183 """Allow extensions to wrap the sink returned by convcmd.convertsink()
2184 before it is used, whether or not the convert extension was formally loaded.
2184 before it is used, whether or not the convert extension was formally loaded.
2185 """
2185 """
2186 return sink
2186 return sink
2187
2187
2188
2188
2189 def unhidehashlikerevs(repo, specs, hiddentype):
2189 def unhidehashlikerevs(repo, specs, hiddentype):
2190 """parse the user specs and unhide changesets whose hash or revision number
2190 """parse the user specs and unhide changesets whose hash or revision number
2191 is passed.
2191 is passed.
2192
2192
2193 hiddentype can be: 1) 'warn': warn while unhiding changesets
2193 hiddentype can be: 1) 'warn': warn while unhiding changesets
2194 2) 'nowarn': don't warn while unhiding changesets
2194 2) 'nowarn': don't warn while unhiding changesets
2195
2195
2196 returns a repo object with the required changesets unhidden
2196 returns a repo object with the required changesets unhidden
2197 """
2197 """
2198 if not repo.filtername or not repo.ui.configbool(
2198 if not repo.filtername or not repo.ui.configbool(
2199 b'experimental', b'directaccess'
2199 b'experimental', b'directaccess'
2200 ):
2200 ):
2201 return repo
2201 return repo
2202
2202
2203 if repo.filtername not in (b'visible', b'visible-hidden'):
2203 if repo.filtername not in (b'visible', b'visible-hidden'):
2204 return repo
2204 return repo
2205
2205
2206 symbols = set()
2206 symbols = set()
2207 for spec in specs:
2207 for spec in specs:
2208 try:
2208 try:
2209 tree = revsetlang.parse(spec)
2209 tree = revsetlang.parse(spec)
2210 except error.ParseError: # will be reported by scmutil.revrange()
2210 except error.ParseError: # will be reported by scmutil.revrange()
2211 continue
2211 continue
2212
2212
2213 symbols.update(revsetlang.gethashlikesymbols(tree))
2213 symbols.update(revsetlang.gethashlikesymbols(tree))
2214
2214
2215 if not symbols:
2215 if not symbols:
2216 return repo
2216 return repo
2217
2217
2218 revs = _getrevsfromsymbols(repo, symbols)
2218 revs = _getrevsfromsymbols(repo, symbols)
2219
2219
2220 if not revs:
2220 if not revs:
2221 return repo
2221 return repo
2222
2222
2223 if hiddentype == b'warn':
2223 if hiddentype == b'warn':
2224 unfi = repo.unfiltered()
2224 unfi = repo.unfiltered()
2225 revstr = b", ".join([pycompat.bytestr(unfi[l]) for l in revs])
2225 revstr = b", ".join([pycompat.bytestr(unfi[l]) for l in revs])
2226 repo.ui.warn(
2226 repo.ui.warn(
2227 _(
2227 _(
2228 b"warning: accessing hidden changesets for write "
2228 b"warning: accessing hidden changesets for write "
2229 b"operation: %s\n"
2229 b"operation: %s\n"
2230 )
2230 )
2231 % revstr
2231 % revstr
2232 )
2232 )
2233
2233
2234 # we have to use new filtername to separate branch/tags cache until we can
2234 # we have to use new filtername to separate branch/tags cache until we can
2235 # disbale these cache when revisions are dynamically pinned.
2235 # disbale these cache when revisions are dynamically pinned.
2236 return repo.filtered(b'visible-hidden', revs)
2236 return repo.filtered(b'visible-hidden', revs)
2237
2237
2238
2238
2239 def _getrevsfromsymbols(repo, symbols):
2239 def _getrevsfromsymbols(repo, symbols):
2240 """parse the list of symbols and returns a set of revision numbers of hidden
2240 """parse the list of symbols and returns a set of revision numbers of hidden
2241 changesets present in symbols"""
2241 changesets present in symbols"""
2242 revs = set()
2242 revs = set()
2243 unfi = repo.unfiltered()
2243 unfi = repo.unfiltered()
2244 unficl = unfi.changelog
2244 unficl = unfi.changelog
2245 cl = repo.changelog
2245 cl = repo.changelog
2246 tiprev = len(unficl)
2246 tiprev = len(unficl)
2247 allowrevnums = repo.ui.configbool(b'experimental', b'directaccess.revnums')
2247 allowrevnums = repo.ui.configbool(b'experimental', b'directaccess.revnums')
2248 for s in symbols:
2248 for s in symbols:
2249 try:
2249 try:
2250 n = int(s)
2250 n = int(s)
2251 if n <= tiprev:
2251 if n <= tiprev:
2252 if not allowrevnums:
2252 if not allowrevnums:
2253 continue
2253 continue
2254 else:
2254 else:
2255 if n not in cl:
2255 if n not in cl:
2256 revs.add(n)
2256 revs.add(n)
2257 continue
2257 continue
2258 except ValueError:
2258 except ValueError:
2259 pass
2259 pass
2260
2260
2261 try:
2261 try:
2262 s = resolvehexnodeidprefix(unfi, s)
2262 s = resolvehexnodeidprefix(unfi, s)
2263 except (error.LookupError, error.WdirUnsupported):
2263 except (error.LookupError, error.WdirUnsupported):
2264 s = None
2264 s = None
2265
2265
2266 if s is not None:
2266 if s is not None:
2267 rev = unficl.rev(s)
2267 rev = unficl.rev(s)
2268 if rev not in cl:
2268 if rev not in cl:
2269 revs.add(rev)
2269 revs.add(rev)
2270
2270
2271 return revs
2271 return revs
2272
2272
2273
2273
2274 def bookmarkrevs(repo, mark):
2274 def bookmarkrevs(repo, mark):
2275 """Select revisions reachable by a given bookmark
2275 """Select revisions reachable by a given bookmark
2276
2276
2277 If the bookmarked revision isn't a head, an empty set will be returned.
2277 If the bookmarked revision isn't a head, an empty set will be returned.
2278 """
2278 """
2279 return repo.revs(format_bookmark_revspec(mark))
2279 return repo.revs(format_bookmark_revspec(mark))
2280
2280
2281
2281
2282 def format_bookmark_revspec(mark):
2282 def format_bookmark_revspec(mark):
2283 """Build a revset expression to select revisions reachable by a given
2283 """Build a revset expression to select revisions reachable by a given
2284 bookmark"""
2284 bookmark"""
2285 mark = b'literal:' + mark
2285 mark = b'literal:' + mark
2286 return revsetlang.formatspec(
2286 return revsetlang.formatspec(
2287 b"ancestors(bookmark(%s)) - "
2287 b"ancestors(bookmark(%s)) - "
2288 b"ancestors(head() and not bookmark(%s)) - "
2288 b"ancestors(head() and not bookmark(%s)) - "
2289 b"ancestors(bookmark() and not bookmark(%s))",
2289 b"ancestors(bookmark() and not bookmark(%s))",
2290 mark,
2290 mark,
2291 mark,
2291 mark,
2292 mark,
2292 mark,
2293 )
2293 )
@@ -1,69 +1,69
1 #require no-icasefs
1 #require no-icasefs
2
2
3 test file addition with colliding case
3 test file addition with colliding case
4
4
5 $ hg init repo1
5 $ hg init repo1
6 $ cd repo1
6 $ cd repo1
7 $ echo a > a
7 $ echo a > a
8 $ echo A > A
8 $ echo A > A
9 $ hg add a
9 $ hg add a
10 $ hg st
10 $ hg st
11 A a
11 A a
12 ? A
12 ? A
13 $ hg add --config ui.portablefilenames=abort A
13 $ hg add --config ui.portablefilenames=abort A
14 abort: possible case-folding collision for A
14 abort: possible case-folding collision for A
15 [255]
15 [20]
16 $ hg st
16 $ hg st
17 A a
17 A a
18 ? A
18 ? A
19 $ hg add A
19 $ hg add A
20 warning: possible case-folding collision for A
20 warning: possible case-folding collision for A
21 $ hg st
21 $ hg st
22 A A
22 A A
23 A a
23 A a
24 $ hg forget A
24 $ hg forget A
25 $ hg st
25 $ hg st
26 A a
26 A a
27 ? A
27 ? A
28 $ hg add --config ui.portablefilenames=no A
28 $ hg add --config ui.portablefilenames=no A
29 $ hg st
29 $ hg st
30 A A
30 A A
31 A a
31 A a
32 $ mkdir b
32 $ mkdir b
33 $ touch b/c b/D
33 $ touch b/c b/D
34 $ hg add b
34 $ hg add b
35 adding b/D
35 adding b/D
36 adding b/c
36 adding b/c
37 $ touch b/d b/C
37 $ touch b/d b/C
38 $ hg add b/C
38 $ hg add b/C
39 warning: possible case-folding collision for b/C
39 warning: possible case-folding collision for b/C
40 $ hg add b/d
40 $ hg add b/d
41 warning: possible case-folding collision for b/d
41 warning: possible case-folding collision for b/d
42 $ touch b/a1 b/a2
42 $ touch b/a1 b/a2
43 $ hg add b
43 $ hg add b
44 adding b/a1
44 adding b/a1
45 adding b/a2
45 adding b/a2
46 $ touch b/A2 b/a1.1
46 $ touch b/A2 b/a1.1
47 $ hg add b/a1.1 b/A2
47 $ hg add b/a1.1 b/A2
48 warning: possible case-folding collision for b/A2
48 warning: possible case-folding collision for b/A2
49 $ touch b/f b/F
49 $ touch b/f b/F
50 $ hg add b/f b/F
50 $ hg add b/f b/F
51 warning: possible case-folding collision for b/f
51 warning: possible case-folding collision for b/f
52 $ touch g G
52 $ touch g G
53 $ hg add g G
53 $ hg add g G
54 warning: possible case-folding collision for g
54 warning: possible case-folding collision for g
55 $ mkdir h H
55 $ mkdir h H
56 $ touch h/x H/x
56 $ touch h/x H/x
57 $ hg add h/x H/x
57 $ hg add h/x H/x
58 warning: possible case-folding collision for h/x
58 warning: possible case-folding collision for h/x
59 $ touch h/s H/s
59 $ touch h/s H/s
60 $ hg add h/s
60 $ hg add h/s
61 $ hg add H/s
61 $ hg add H/s
62 warning: possible case-folding collision for H/s
62 warning: possible case-folding collision for H/s
63
63
64 case changing rename must not warn or abort
64 case changing rename must not warn or abort
65
65
66 $ echo c > c
66 $ echo c > c
67 $ hg ci -qAmx
67 $ hg ci -qAmx
68 $ hg mv c C
68 $ hg mv c C
69 $ cd ..
69 $ cd ..
General Comments 0
You need to be logged in to leave comments. Login now