##// END OF EJS Templates
status: fix default value of status struct...
Yuya Nishihara -
r44164:7d237fd3 default draft
parent child Browse files
Show More
@@ -1,2197 +1,2197 b''
1 # scmutil.py - Mercurial core utility functions
1 # scmutil.py - Mercurial core utility functions
2 #
2 #
3 # Copyright Matt Mackall <mpm@selenic.com>
3 # Copyright Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import glob
11 import glob
12 import hashlib
12 import hashlib
13 import os
13 import os
14 import posixpath
14 import posixpath
15 import re
15 import re
16 import subprocess
16 import subprocess
17 import weakref
17 import weakref
18
18
19 from .i18n import _
19 from .i18n import _
20 from .node import (
20 from .node import (
21 bin,
21 bin,
22 hex,
22 hex,
23 nullid,
23 nullid,
24 nullrev,
24 nullrev,
25 short,
25 short,
26 wdirid,
26 wdirid,
27 wdirrev,
27 wdirrev,
28 )
28 )
29 from .pycompat import getattr
29 from .pycompat import getattr
30 from .thirdparty import attr
30 from .thirdparty import attr
31 from . import (
31 from . import (
32 copies as copiesmod,
32 copies as copiesmod,
33 encoding,
33 encoding,
34 error,
34 error,
35 match as matchmod,
35 match as matchmod,
36 obsolete,
36 obsolete,
37 obsutil,
37 obsutil,
38 pathutil,
38 pathutil,
39 phases,
39 phases,
40 policy,
40 policy,
41 pycompat,
41 pycompat,
42 revsetlang,
42 revsetlang,
43 similar,
43 similar,
44 smartset,
44 smartset,
45 url,
45 url,
46 util,
46 util,
47 vfs,
47 vfs,
48 )
48 )
49
49
50 from .utils import (
50 from .utils import (
51 procutil,
51 procutil,
52 stringutil,
52 stringutil,
53 )
53 )
54
54
55 if pycompat.iswindows:
55 if pycompat.iswindows:
56 from . import scmwindows as scmplatform
56 from . import scmwindows as scmplatform
57 else:
57 else:
58 from . import scmposix as scmplatform
58 from . import scmposix as scmplatform
59
59
60 parsers = policy.importmod('parsers')
60 parsers = policy.importmod('parsers')
61
61
62 termsize = scmplatform.termsize
62 termsize = scmplatform.termsize
63
63
64
64
65 @attr.s(slots=True, repr=False)
65 @attr.s(slots=True, repr=False)
66 class status(object):
66 class status(object):
67 '''Struct with a list of files per status.
67 '''Struct with a list of files per status.
68
68
69 The 'deleted', 'unknown' and 'ignored' properties are only
69 The 'deleted', 'unknown' and 'ignored' properties are only
70 relevant to the working copy.
70 relevant to the working copy.
71 '''
71 '''
72
72
73 modified = attr.ib(default=list)
73 modified = attr.ib(default=attr.Factory(list))
74 added = attr.ib(default=list)
74 added = attr.ib(default=attr.Factory(list))
75 removed = attr.ib(default=list)
75 removed = attr.ib(default=attr.Factory(list))
76 deleted = attr.ib(default=list)
76 deleted = attr.ib(default=attr.Factory(list))
77 unknown = attr.ib(default=list)
77 unknown = attr.ib(default=attr.Factory(list))
78 ignored = attr.ib(default=list)
78 ignored = attr.ib(default=attr.Factory(list))
79 clean = attr.ib(default=list)
79 clean = attr.ib(default=attr.Factory(list))
80
80
81 def __iter__(self):
81 def __iter__(self):
82 yield self.modified
82 yield self.modified
83 yield self.added
83 yield self.added
84 yield self.removed
84 yield self.removed
85 yield self.deleted
85 yield self.deleted
86 yield self.unknown
86 yield self.unknown
87 yield self.ignored
87 yield self.ignored
88 yield self.clean
88 yield self.clean
89
89
90 def __repr__(self):
90 def __repr__(self):
91 return (
91 return (
92 r'<status modified=%s, added=%s, removed=%s, deleted=%s, '
92 r'<status modified=%s, added=%s, removed=%s, deleted=%s, '
93 r'unknown=%s, ignored=%s, clean=%s>'
93 r'unknown=%s, ignored=%s, clean=%s>'
94 ) % tuple(pycompat.sysstr(stringutil.pprint(v)) for v in self)
94 ) % tuple(pycompat.sysstr(stringutil.pprint(v)) for v in self)
95
95
96
96
97 def itersubrepos(ctx1, ctx2):
97 def itersubrepos(ctx1, ctx2):
98 """find subrepos in ctx1 or ctx2"""
98 """find subrepos in ctx1 or ctx2"""
99 # Create a (subpath, ctx) mapping where we prefer subpaths from
99 # Create a (subpath, ctx) mapping where we prefer subpaths from
100 # ctx1. The subpaths from ctx2 are important when the .hgsub file
100 # ctx1. The subpaths from ctx2 are important when the .hgsub file
101 # has been modified (in ctx2) but not yet committed (in ctx1).
101 # has been modified (in ctx2) but not yet committed (in ctx1).
102 subpaths = dict.fromkeys(ctx2.substate, ctx2)
102 subpaths = dict.fromkeys(ctx2.substate, ctx2)
103 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
103 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
104
104
105 missing = set()
105 missing = set()
106
106
107 for subpath in ctx2.substate:
107 for subpath in ctx2.substate:
108 if subpath not in ctx1.substate:
108 if subpath not in ctx1.substate:
109 del subpaths[subpath]
109 del subpaths[subpath]
110 missing.add(subpath)
110 missing.add(subpath)
111
111
112 for subpath, ctx in sorted(pycompat.iteritems(subpaths)):
112 for subpath, ctx in sorted(pycompat.iteritems(subpaths)):
113 yield subpath, ctx.sub(subpath)
113 yield subpath, ctx.sub(subpath)
114
114
115 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
115 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
116 # status and diff will have an accurate result when it does
116 # status and diff will have an accurate result when it does
117 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
117 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
118 # against itself.
118 # against itself.
119 for subpath in missing:
119 for subpath in missing:
120 yield subpath, ctx2.nullsub(subpath, ctx1)
120 yield subpath, ctx2.nullsub(subpath, ctx1)
121
121
122
122
123 def nochangesfound(ui, repo, excluded=None):
123 def nochangesfound(ui, repo, excluded=None):
124 '''Report no changes for push/pull, excluded is None or a list of
124 '''Report no changes for push/pull, excluded is None or a list of
125 nodes excluded from the push/pull.
125 nodes excluded from the push/pull.
126 '''
126 '''
127 secretlist = []
127 secretlist = []
128 if excluded:
128 if excluded:
129 for n in excluded:
129 for n in excluded:
130 ctx = repo[n]
130 ctx = repo[n]
131 if ctx.phase() >= phases.secret and not ctx.extinct():
131 if ctx.phase() >= phases.secret and not ctx.extinct():
132 secretlist.append(n)
132 secretlist.append(n)
133
133
134 if secretlist:
134 if secretlist:
135 ui.status(
135 ui.status(
136 _(b"no changes found (ignored %d secret changesets)\n")
136 _(b"no changes found (ignored %d secret changesets)\n")
137 % len(secretlist)
137 % len(secretlist)
138 )
138 )
139 else:
139 else:
140 ui.status(_(b"no changes found\n"))
140 ui.status(_(b"no changes found\n"))
141
141
142
142
143 def callcatch(ui, func):
143 def callcatch(ui, func):
144 """call func() with global exception handling
144 """call func() with global exception handling
145
145
146 return func() if no exception happens. otherwise do some error handling
146 return func() if no exception happens. otherwise do some error handling
147 and return an exit code accordingly. does not handle all exceptions.
147 and return an exit code accordingly. does not handle all exceptions.
148 """
148 """
149 try:
149 try:
150 try:
150 try:
151 return func()
151 return func()
152 except: # re-raises
152 except: # re-raises
153 ui.traceback()
153 ui.traceback()
154 raise
154 raise
155 # Global exception handling, alphabetically
155 # Global exception handling, alphabetically
156 # Mercurial-specific first, followed by built-in and library exceptions
156 # Mercurial-specific first, followed by built-in and library exceptions
157 except error.LockHeld as inst:
157 except error.LockHeld as inst:
158 if inst.errno == errno.ETIMEDOUT:
158 if inst.errno == errno.ETIMEDOUT:
159 reason = _(b'timed out waiting for lock held by %r') % (
159 reason = _(b'timed out waiting for lock held by %r') % (
160 pycompat.bytestr(inst.locker)
160 pycompat.bytestr(inst.locker)
161 )
161 )
162 else:
162 else:
163 reason = _(b'lock held by %r') % inst.locker
163 reason = _(b'lock held by %r') % inst.locker
164 ui.error(
164 ui.error(
165 _(b"abort: %s: %s\n")
165 _(b"abort: %s: %s\n")
166 % (inst.desc or stringutil.forcebytestr(inst.filename), reason)
166 % (inst.desc or stringutil.forcebytestr(inst.filename), reason)
167 )
167 )
168 if not inst.locker:
168 if not inst.locker:
169 ui.error(_(b"(lock might be very busy)\n"))
169 ui.error(_(b"(lock might be very busy)\n"))
170 except error.LockUnavailable as inst:
170 except error.LockUnavailable as inst:
171 ui.error(
171 ui.error(
172 _(b"abort: could not lock %s: %s\n")
172 _(b"abort: could not lock %s: %s\n")
173 % (
173 % (
174 inst.desc or stringutil.forcebytestr(inst.filename),
174 inst.desc or stringutil.forcebytestr(inst.filename),
175 encoding.strtolocal(inst.strerror),
175 encoding.strtolocal(inst.strerror),
176 )
176 )
177 )
177 )
178 except error.OutOfBandError as inst:
178 except error.OutOfBandError as inst:
179 if inst.args:
179 if inst.args:
180 msg = _(b"abort: remote error:\n")
180 msg = _(b"abort: remote error:\n")
181 else:
181 else:
182 msg = _(b"abort: remote error\n")
182 msg = _(b"abort: remote error\n")
183 ui.error(msg)
183 ui.error(msg)
184 if inst.args:
184 if inst.args:
185 ui.error(b''.join(inst.args))
185 ui.error(b''.join(inst.args))
186 if inst.hint:
186 if inst.hint:
187 ui.error(b'(%s)\n' % inst.hint)
187 ui.error(b'(%s)\n' % inst.hint)
188 except error.RepoError as inst:
188 except error.RepoError as inst:
189 ui.error(_(b"abort: %s!\n") % inst)
189 ui.error(_(b"abort: %s!\n") % inst)
190 if inst.hint:
190 if inst.hint:
191 ui.error(_(b"(%s)\n") % inst.hint)
191 ui.error(_(b"(%s)\n") % inst.hint)
192 except error.ResponseError as inst:
192 except error.ResponseError as inst:
193 ui.error(_(b"abort: %s") % inst.args[0])
193 ui.error(_(b"abort: %s") % inst.args[0])
194 msg = inst.args[1]
194 msg = inst.args[1]
195 if isinstance(msg, type(u'')):
195 if isinstance(msg, type(u'')):
196 msg = pycompat.sysbytes(msg)
196 msg = pycompat.sysbytes(msg)
197 if not isinstance(msg, bytes):
197 if not isinstance(msg, bytes):
198 ui.error(b" %r\n" % (msg,))
198 ui.error(b" %r\n" % (msg,))
199 elif not msg:
199 elif not msg:
200 ui.error(_(b" empty string\n"))
200 ui.error(_(b" empty string\n"))
201 else:
201 else:
202 ui.error(b"\n%r\n" % pycompat.bytestr(stringutil.ellipsis(msg)))
202 ui.error(b"\n%r\n" % pycompat.bytestr(stringutil.ellipsis(msg)))
203 except error.CensoredNodeError as inst:
203 except error.CensoredNodeError as inst:
204 ui.error(_(b"abort: file censored %s!\n") % inst)
204 ui.error(_(b"abort: file censored %s!\n") % inst)
205 except error.StorageError as inst:
205 except error.StorageError as inst:
206 ui.error(_(b"abort: %s!\n") % inst)
206 ui.error(_(b"abort: %s!\n") % inst)
207 if inst.hint:
207 if inst.hint:
208 ui.error(_(b"(%s)\n") % inst.hint)
208 ui.error(_(b"(%s)\n") % inst.hint)
209 except error.InterventionRequired as inst:
209 except error.InterventionRequired as inst:
210 ui.error(b"%s\n" % inst)
210 ui.error(b"%s\n" % inst)
211 if inst.hint:
211 if inst.hint:
212 ui.error(_(b"(%s)\n") % inst.hint)
212 ui.error(_(b"(%s)\n") % inst.hint)
213 return 1
213 return 1
214 except error.WdirUnsupported:
214 except error.WdirUnsupported:
215 ui.error(_(b"abort: working directory revision cannot be specified\n"))
215 ui.error(_(b"abort: working directory revision cannot be specified\n"))
216 except error.Abort as inst:
216 except error.Abort as inst:
217 ui.error(_(b"abort: %s\n") % inst)
217 ui.error(_(b"abort: %s\n") % inst)
218 if inst.hint:
218 if inst.hint:
219 ui.error(_(b"(%s)\n") % inst.hint)
219 ui.error(_(b"(%s)\n") % inst.hint)
220 except ImportError as inst:
220 except ImportError as inst:
221 ui.error(_(b"abort: %s!\n") % stringutil.forcebytestr(inst))
221 ui.error(_(b"abort: %s!\n") % stringutil.forcebytestr(inst))
222 m = stringutil.forcebytestr(inst).split()[-1]
222 m = stringutil.forcebytestr(inst).split()[-1]
223 if m in b"mpatch bdiff".split():
223 if m in b"mpatch bdiff".split():
224 ui.error(_(b"(did you forget to compile extensions?)\n"))
224 ui.error(_(b"(did you forget to compile extensions?)\n"))
225 elif m in b"zlib".split():
225 elif m in b"zlib".split():
226 ui.error(_(b"(is your Python install correct?)\n"))
226 ui.error(_(b"(is your Python install correct?)\n"))
227 except (IOError, OSError) as inst:
227 except (IOError, OSError) as inst:
228 if util.safehasattr(inst, b"code"): # HTTPError
228 if util.safehasattr(inst, b"code"): # HTTPError
229 ui.error(_(b"abort: %s\n") % stringutil.forcebytestr(inst))
229 ui.error(_(b"abort: %s\n") % stringutil.forcebytestr(inst))
230 elif util.safehasattr(inst, b"reason"): # URLError or SSLError
230 elif util.safehasattr(inst, b"reason"): # URLError or SSLError
231 try: # usually it is in the form (errno, strerror)
231 try: # usually it is in the form (errno, strerror)
232 reason = inst.reason.args[1]
232 reason = inst.reason.args[1]
233 except (AttributeError, IndexError):
233 except (AttributeError, IndexError):
234 # it might be anything, for example a string
234 # it might be anything, for example a string
235 reason = inst.reason
235 reason = inst.reason
236 if isinstance(reason, pycompat.unicode):
236 if isinstance(reason, pycompat.unicode):
237 # SSLError of Python 2.7.9 contains a unicode
237 # SSLError of Python 2.7.9 contains a unicode
238 reason = encoding.unitolocal(reason)
238 reason = encoding.unitolocal(reason)
239 ui.error(_(b"abort: error: %s\n") % reason)
239 ui.error(_(b"abort: error: %s\n") % reason)
240 elif (
240 elif (
241 util.safehasattr(inst, b"args")
241 util.safehasattr(inst, b"args")
242 and inst.args
242 and inst.args
243 and inst.args[0] == errno.EPIPE
243 and inst.args[0] == errno.EPIPE
244 ):
244 ):
245 pass
245 pass
246 elif getattr(inst, "strerror", None): # common IOError or OSError
246 elif getattr(inst, "strerror", None): # common IOError or OSError
247 if getattr(inst, "filename", None) is not None:
247 if getattr(inst, "filename", None) is not None:
248 ui.error(
248 ui.error(
249 _(b"abort: %s: '%s'\n")
249 _(b"abort: %s: '%s'\n")
250 % (
250 % (
251 encoding.strtolocal(inst.strerror),
251 encoding.strtolocal(inst.strerror),
252 stringutil.forcebytestr(inst.filename),
252 stringutil.forcebytestr(inst.filename),
253 )
253 )
254 )
254 )
255 else:
255 else:
256 ui.error(_(b"abort: %s\n") % encoding.strtolocal(inst.strerror))
256 ui.error(_(b"abort: %s\n") % encoding.strtolocal(inst.strerror))
257 else: # suspicious IOError
257 else: # suspicious IOError
258 raise
258 raise
259 except MemoryError:
259 except MemoryError:
260 ui.error(_(b"abort: out of memory\n"))
260 ui.error(_(b"abort: out of memory\n"))
261 except SystemExit as inst:
261 except SystemExit as inst:
262 # Commands shouldn't sys.exit directly, but give a return code.
262 # Commands shouldn't sys.exit directly, but give a return code.
263 # Just in case catch this and and pass exit code to caller.
263 # Just in case catch this and and pass exit code to caller.
264 return inst.code
264 return inst.code
265
265
266 return -1
266 return -1
267
267
268
268
269 def checknewlabel(repo, lbl, kind):
269 def checknewlabel(repo, lbl, kind):
270 # Do not use the "kind" parameter in ui output.
270 # Do not use the "kind" parameter in ui output.
271 # It makes strings difficult to translate.
271 # It makes strings difficult to translate.
272 if lbl in [b'tip', b'.', b'null']:
272 if lbl in [b'tip', b'.', b'null']:
273 raise error.Abort(_(b"the name '%s' is reserved") % lbl)
273 raise error.Abort(_(b"the name '%s' is reserved") % lbl)
274 for c in (b':', b'\0', b'\n', b'\r'):
274 for c in (b':', b'\0', b'\n', b'\r'):
275 if c in lbl:
275 if c in lbl:
276 raise error.Abort(
276 raise error.Abort(
277 _(b"%r cannot be used in a name") % pycompat.bytestr(c)
277 _(b"%r cannot be used in a name") % pycompat.bytestr(c)
278 )
278 )
279 try:
279 try:
280 int(lbl)
280 int(lbl)
281 raise error.Abort(_(b"cannot use an integer as a name"))
281 raise error.Abort(_(b"cannot use an integer as a name"))
282 except ValueError:
282 except ValueError:
283 pass
283 pass
284 if lbl.strip() != lbl:
284 if lbl.strip() != lbl:
285 raise error.Abort(_(b"leading or trailing whitespace in name %r") % lbl)
285 raise error.Abort(_(b"leading or trailing whitespace in name %r") % lbl)
286
286
287
287
288 def checkfilename(f):
288 def checkfilename(f):
289 '''Check that the filename f is an acceptable filename for a tracked file'''
289 '''Check that the filename f is an acceptable filename for a tracked file'''
290 if b'\r' in f or b'\n' in f:
290 if b'\r' in f or b'\n' in f:
291 raise error.Abort(
291 raise error.Abort(
292 _(b"'\\n' and '\\r' disallowed in filenames: %r")
292 _(b"'\\n' and '\\r' disallowed in filenames: %r")
293 % pycompat.bytestr(f)
293 % pycompat.bytestr(f)
294 )
294 )
295
295
296
296
297 def checkportable(ui, f):
297 def checkportable(ui, f):
298 '''Check if filename f is portable and warn or abort depending on config'''
298 '''Check if filename f is portable and warn or abort depending on config'''
299 checkfilename(f)
299 checkfilename(f)
300 abort, warn = checkportabilityalert(ui)
300 abort, warn = checkportabilityalert(ui)
301 if abort or warn:
301 if abort or warn:
302 msg = util.checkwinfilename(f)
302 msg = util.checkwinfilename(f)
303 if msg:
303 if msg:
304 msg = b"%s: %s" % (msg, procutil.shellquote(f))
304 msg = b"%s: %s" % (msg, procutil.shellquote(f))
305 if abort:
305 if abort:
306 raise error.Abort(msg)
306 raise error.Abort(msg)
307 ui.warn(_(b"warning: %s\n") % msg)
307 ui.warn(_(b"warning: %s\n") % msg)
308
308
309
309
310 def checkportabilityalert(ui):
310 def checkportabilityalert(ui):
311 '''check if the user's config requests nothing, a warning, or abort for
311 '''check if the user's config requests nothing, a warning, or abort for
312 non-portable filenames'''
312 non-portable filenames'''
313 val = ui.config(b'ui', b'portablefilenames')
313 val = ui.config(b'ui', b'portablefilenames')
314 lval = val.lower()
314 lval = val.lower()
315 bval = stringutil.parsebool(val)
315 bval = stringutil.parsebool(val)
316 abort = pycompat.iswindows or lval == b'abort'
316 abort = pycompat.iswindows or lval == b'abort'
317 warn = bval or lval == b'warn'
317 warn = bval or lval == b'warn'
318 if bval is None and not (warn or abort or lval == b'ignore'):
318 if bval is None and not (warn or abort or lval == b'ignore'):
319 raise error.ConfigError(
319 raise error.ConfigError(
320 _(b"ui.portablefilenames value is invalid ('%s')") % val
320 _(b"ui.portablefilenames value is invalid ('%s')") % val
321 )
321 )
322 return abort, warn
322 return abort, warn
323
323
324
324
325 class casecollisionauditor(object):
325 class casecollisionauditor(object):
326 def __init__(self, ui, abort, dirstate):
326 def __init__(self, ui, abort, dirstate):
327 self._ui = ui
327 self._ui = ui
328 self._abort = abort
328 self._abort = abort
329 allfiles = b'\0'.join(dirstate)
329 allfiles = b'\0'.join(dirstate)
330 self._loweredfiles = set(encoding.lower(allfiles).split(b'\0'))
330 self._loweredfiles = set(encoding.lower(allfiles).split(b'\0'))
331 self._dirstate = dirstate
331 self._dirstate = dirstate
332 # The purpose of _newfiles is so that we don't complain about
332 # The purpose of _newfiles is so that we don't complain about
333 # case collisions if someone were to call this object with the
333 # case collisions if someone were to call this object with the
334 # same filename twice.
334 # same filename twice.
335 self._newfiles = set()
335 self._newfiles = set()
336
336
337 def __call__(self, f):
337 def __call__(self, f):
338 if f in self._newfiles:
338 if f in self._newfiles:
339 return
339 return
340 fl = encoding.lower(f)
340 fl = encoding.lower(f)
341 if fl in self._loweredfiles and f not in self._dirstate:
341 if fl in self._loweredfiles and f not in self._dirstate:
342 msg = _(b'possible case-folding collision for %s') % f
342 msg = _(b'possible case-folding collision for %s') % f
343 if self._abort:
343 if self._abort:
344 raise error.Abort(msg)
344 raise error.Abort(msg)
345 self._ui.warn(_(b"warning: %s\n") % msg)
345 self._ui.warn(_(b"warning: %s\n") % msg)
346 self._loweredfiles.add(fl)
346 self._loweredfiles.add(fl)
347 self._newfiles.add(f)
347 self._newfiles.add(f)
348
348
349
349
350 def filteredhash(repo, maxrev):
350 def filteredhash(repo, maxrev):
351 """build hash of filtered revisions in the current repoview.
351 """build hash of filtered revisions in the current repoview.
352
352
353 Multiple caches perform up-to-date validation by checking that the
353 Multiple caches perform up-to-date validation by checking that the
354 tiprev and tipnode stored in the cache file match the current repository.
354 tiprev and tipnode stored in the cache file match the current repository.
355 However, this is not sufficient for validating repoviews because the set
355 However, this is not sufficient for validating repoviews because the set
356 of revisions in the view may change without the repository tiprev and
356 of revisions in the view may change without the repository tiprev and
357 tipnode changing.
357 tipnode changing.
358
358
359 This function hashes all the revs filtered from the view and returns
359 This function hashes all the revs filtered from the view and returns
360 that SHA-1 digest.
360 that SHA-1 digest.
361 """
361 """
362 cl = repo.changelog
362 cl = repo.changelog
363 if not cl.filteredrevs:
363 if not cl.filteredrevs:
364 return None
364 return None
365 key = None
365 key = None
366 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
366 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
367 if revs:
367 if revs:
368 s = hashlib.sha1()
368 s = hashlib.sha1()
369 for rev in revs:
369 for rev in revs:
370 s.update(b'%d;' % rev)
370 s.update(b'%d;' % rev)
371 key = s.digest()
371 key = s.digest()
372 return key
372 return key
373
373
374
374
375 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
375 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
376 '''yield every hg repository under path, always recursively.
376 '''yield every hg repository under path, always recursively.
377 The recurse flag will only control recursion into repo working dirs'''
377 The recurse flag will only control recursion into repo working dirs'''
378
378
379 def errhandler(err):
379 def errhandler(err):
380 if err.filename == path:
380 if err.filename == path:
381 raise err
381 raise err
382
382
383 samestat = getattr(os.path, 'samestat', None)
383 samestat = getattr(os.path, 'samestat', None)
384 if followsym and samestat is not None:
384 if followsym and samestat is not None:
385
385
386 def adddir(dirlst, dirname):
386 def adddir(dirlst, dirname):
387 dirstat = os.stat(dirname)
387 dirstat = os.stat(dirname)
388 match = any(samestat(dirstat, lstdirstat) for lstdirstat in dirlst)
388 match = any(samestat(dirstat, lstdirstat) for lstdirstat in dirlst)
389 if not match:
389 if not match:
390 dirlst.append(dirstat)
390 dirlst.append(dirstat)
391 return not match
391 return not match
392
392
393 else:
393 else:
394 followsym = False
394 followsym = False
395
395
396 if (seen_dirs is None) and followsym:
396 if (seen_dirs is None) and followsym:
397 seen_dirs = []
397 seen_dirs = []
398 adddir(seen_dirs, path)
398 adddir(seen_dirs, path)
399 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
399 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
400 dirs.sort()
400 dirs.sort()
401 if b'.hg' in dirs:
401 if b'.hg' in dirs:
402 yield root # found a repository
402 yield root # found a repository
403 qroot = os.path.join(root, b'.hg', b'patches')
403 qroot = os.path.join(root, b'.hg', b'patches')
404 if os.path.isdir(os.path.join(qroot, b'.hg')):
404 if os.path.isdir(os.path.join(qroot, b'.hg')):
405 yield qroot # we have a patch queue repo here
405 yield qroot # we have a patch queue repo here
406 if recurse:
406 if recurse:
407 # avoid recursing inside the .hg directory
407 # avoid recursing inside the .hg directory
408 dirs.remove(b'.hg')
408 dirs.remove(b'.hg')
409 else:
409 else:
410 dirs[:] = [] # don't descend further
410 dirs[:] = [] # don't descend further
411 elif followsym:
411 elif followsym:
412 newdirs = []
412 newdirs = []
413 for d in dirs:
413 for d in dirs:
414 fname = os.path.join(root, d)
414 fname = os.path.join(root, d)
415 if adddir(seen_dirs, fname):
415 if adddir(seen_dirs, fname):
416 if os.path.islink(fname):
416 if os.path.islink(fname):
417 for hgname in walkrepos(fname, True, seen_dirs):
417 for hgname in walkrepos(fname, True, seen_dirs):
418 yield hgname
418 yield hgname
419 else:
419 else:
420 newdirs.append(d)
420 newdirs.append(d)
421 dirs[:] = newdirs
421 dirs[:] = newdirs
422
422
423
423
424 def binnode(ctx):
424 def binnode(ctx):
425 """Return binary node id for a given basectx"""
425 """Return binary node id for a given basectx"""
426 node = ctx.node()
426 node = ctx.node()
427 if node is None:
427 if node is None:
428 return wdirid
428 return wdirid
429 return node
429 return node
430
430
431
431
432 def intrev(ctx):
432 def intrev(ctx):
433 """Return integer for a given basectx that can be used in comparison or
433 """Return integer for a given basectx that can be used in comparison or
434 arithmetic operation"""
434 arithmetic operation"""
435 rev = ctx.rev()
435 rev = ctx.rev()
436 if rev is None:
436 if rev is None:
437 return wdirrev
437 return wdirrev
438 return rev
438 return rev
439
439
440
440
441 def formatchangeid(ctx):
441 def formatchangeid(ctx):
442 """Format changectx as '{rev}:{node|formatnode}', which is the default
442 """Format changectx as '{rev}:{node|formatnode}', which is the default
443 template provided by logcmdutil.changesettemplater"""
443 template provided by logcmdutil.changesettemplater"""
444 repo = ctx.repo()
444 repo = ctx.repo()
445 return formatrevnode(repo.ui, intrev(ctx), binnode(ctx))
445 return formatrevnode(repo.ui, intrev(ctx), binnode(ctx))
446
446
447
447
448 def formatrevnode(ui, rev, node):
448 def formatrevnode(ui, rev, node):
449 """Format given revision and node depending on the current verbosity"""
449 """Format given revision and node depending on the current verbosity"""
450 if ui.debugflag:
450 if ui.debugflag:
451 hexfunc = hex
451 hexfunc = hex
452 else:
452 else:
453 hexfunc = short
453 hexfunc = short
454 return b'%d:%s' % (rev, hexfunc(node))
454 return b'%d:%s' % (rev, hexfunc(node))
455
455
456
456
457 def resolvehexnodeidprefix(repo, prefix):
457 def resolvehexnodeidprefix(repo, prefix):
458 if prefix.startswith(b'x') and repo.ui.configbool(
458 if prefix.startswith(b'x') and repo.ui.configbool(
459 b'experimental', b'revisions.prefixhexnode'
459 b'experimental', b'revisions.prefixhexnode'
460 ):
460 ):
461 prefix = prefix[1:]
461 prefix = prefix[1:]
462 try:
462 try:
463 # Uses unfiltered repo because it's faster when prefix is ambiguous/
463 # Uses unfiltered repo because it's faster when prefix is ambiguous/
464 # This matches the shortesthexnodeidprefix() function below.
464 # This matches the shortesthexnodeidprefix() function below.
465 node = repo.unfiltered().changelog._partialmatch(prefix)
465 node = repo.unfiltered().changelog._partialmatch(prefix)
466 except error.AmbiguousPrefixLookupError:
466 except error.AmbiguousPrefixLookupError:
467 revset = repo.ui.config(
467 revset = repo.ui.config(
468 b'experimental', b'revisions.disambiguatewithin'
468 b'experimental', b'revisions.disambiguatewithin'
469 )
469 )
470 if revset:
470 if revset:
471 # Clear config to avoid infinite recursion
471 # Clear config to avoid infinite recursion
472 configoverrides = {
472 configoverrides = {
473 (b'experimental', b'revisions.disambiguatewithin'): None
473 (b'experimental', b'revisions.disambiguatewithin'): None
474 }
474 }
475 with repo.ui.configoverride(configoverrides):
475 with repo.ui.configoverride(configoverrides):
476 revs = repo.anyrevs([revset], user=True)
476 revs = repo.anyrevs([revset], user=True)
477 matches = []
477 matches = []
478 for rev in revs:
478 for rev in revs:
479 node = repo.changelog.node(rev)
479 node = repo.changelog.node(rev)
480 if hex(node).startswith(prefix):
480 if hex(node).startswith(prefix):
481 matches.append(node)
481 matches.append(node)
482 if len(matches) == 1:
482 if len(matches) == 1:
483 return matches[0]
483 return matches[0]
484 raise
484 raise
485 if node is None:
485 if node is None:
486 return
486 return
487 repo.changelog.rev(node) # make sure node isn't filtered
487 repo.changelog.rev(node) # make sure node isn't filtered
488 return node
488 return node
489
489
490
490
491 def mayberevnum(repo, prefix):
491 def mayberevnum(repo, prefix):
492 """Checks if the given prefix may be mistaken for a revision number"""
492 """Checks if the given prefix may be mistaken for a revision number"""
493 try:
493 try:
494 i = int(prefix)
494 i = int(prefix)
495 # if we are a pure int, then starting with zero will not be
495 # if we are a pure int, then starting with zero will not be
496 # confused as a rev; or, obviously, if the int is larger
496 # confused as a rev; or, obviously, if the int is larger
497 # than the value of the tip rev. We still need to disambiguate if
497 # than the value of the tip rev. We still need to disambiguate if
498 # prefix == '0', since that *is* a valid revnum.
498 # prefix == '0', since that *is* a valid revnum.
499 if (prefix != b'0' and prefix[0:1] == b'0') or i >= len(repo):
499 if (prefix != b'0' and prefix[0:1] == b'0') or i >= len(repo):
500 return False
500 return False
501 return True
501 return True
502 except ValueError:
502 except ValueError:
503 return False
503 return False
504
504
505
505
506 def shortesthexnodeidprefix(repo, node, minlength=1, cache=None):
506 def shortesthexnodeidprefix(repo, node, minlength=1, cache=None):
507 """Find the shortest unambiguous prefix that matches hexnode.
507 """Find the shortest unambiguous prefix that matches hexnode.
508
508
509 If "cache" is not None, it must be a dictionary that can be used for
509 If "cache" is not None, it must be a dictionary that can be used for
510 caching between calls to this method.
510 caching between calls to this method.
511 """
511 """
512 # _partialmatch() of filtered changelog could take O(len(repo)) time,
512 # _partialmatch() of filtered changelog could take O(len(repo)) time,
513 # which would be unacceptably slow. so we look for hash collision in
513 # which would be unacceptably slow. so we look for hash collision in
514 # unfiltered space, which means some hashes may be slightly longer.
514 # unfiltered space, which means some hashes may be slightly longer.
515
515
516 minlength = max(minlength, 1)
516 minlength = max(minlength, 1)
517
517
518 def disambiguate(prefix):
518 def disambiguate(prefix):
519 """Disambiguate against revnums."""
519 """Disambiguate against revnums."""
520 if repo.ui.configbool(b'experimental', b'revisions.prefixhexnode'):
520 if repo.ui.configbool(b'experimental', b'revisions.prefixhexnode'):
521 if mayberevnum(repo, prefix):
521 if mayberevnum(repo, prefix):
522 return b'x' + prefix
522 return b'x' + prefix
523 else:
523 else:
524 return prefix
524 return prefix
525
525
526 hexnode = hex(node)
526 hexnode = hex(node)
527 for length in range(len(prefix), len(hexnode) + 1):
527 for length in range(len(prefix), len(hexnode) + 1):
528 prefix = hexnode[:length]
528 prefix = hexnode[:length]
529 if not mayberevnum(repo, prefix):
529 if not mayberevnum(repo, prefix):
530 return prefix
530 return prefix
531
531
532 cl = repo.unfiltered().changelog
532 cl = repo.unfiltered().changelog
533 revset = repo.ui.config(b'experimental', b'revisions.disambiguatewithin')
533 revset = repo.ui.config(b'experimental', b'revisions.disambiguatewithin')
534 if revset:
534 if revset:
535 revs = None
535 revs = None
536 if cache is not None:
536 if cache is not None:
537 revs = cache.get(b'disambiguationrevset')
537 revs = cache.get(b'disambiguationrevset')
538 if revs is None:
538 if revs is None:
539 revs = repo.anyrevs([revset], user=True)
539 revs = repo.anyrevs([revset], user=True)
540 if cache is not None:
540 if cache is not None:
541 cache[b'disambiguationrevset'] = revs
541 cache[b'disambiguationrevset'] = revs
542 if cl.rev(node) in revs:
542 if cl.rev(node) in revs:
543 hexnode = hex(node)
543 hexnode = hex(node)
544 nodetree = None
544 nodetree = None
545 if cache is not None:
545 if cache is not None:
546 nodetree = cache.get(b'disambiguationnodetree')
546 nodetree = cache.get(b'disambiguationnodetree')
547 if not nodetree:
547 if not nodetree:
548 try:
548 try:
549 nodetree = parsers.nodetree(cl.index, len(revs))
549 nodetree = parsers.nodetree(cl.index, len(revs))
550 except AttributeError:
550 except AttributeError:
551 # no native nodetree
551 # no native nodetree
552 pass
552 pass
553 else:
553 else:
554 for r in revs:
554 for r in revs:
555 nodetree.insert(r)
555 nodetree.insert(r)
556 if cache is not None:
556 if cache is not None:
557 cache[b'disambiguationnodetree'] = nodetree
557 cache[b'disambiguationnodetree'] = nodetree
558 if nodetree is not None:
558 if nodetree is not None:
559 length = max(nodetree.shortest(node), minlength)
559 length = max(nodetree.shortest(node), minlength)
560 prefix = hexnode[:length]
560 prefix = hexnode[:length]
561 return disambiguate(prefix)
561 return disambiguate(prefix)
562 for length in range(minlength, len(hexnode) + 1):
562 for length in range(minlength, len(hexnode) + 1):
563 matches = []
563 matches = []
564 prefix = hexnode[:length]
564 prefix = hexnode[:length]
565 for rev in revs:
565 for rev in revs:
566 otherhexnode = repo[rev].hex()
566 otherhexnode = repo[rev].hex()
567 if prefix == otherhexnode[:length]:
567 if prefix == otherhexnode[:length]:
568 matches.append(otherhexnode)
568 matches.append(otherhexnode)
569 if len(matches) == 1:
569 if len(matches) == 1:
570 return disambiguate(prefix)
570 return disambiguate(prefix)
571
571
572 try:
572 try:
573 return disambiguate(cl.shortest(node, minlength))
573 return disambiguate(cl.shortest(node, minlength))
574 except error.LookupError:
574 except error.LookupError:
575 raise error.RepoLookupError()
575 raise error.RepoLookupError()
576
576
577
577
578 def isrevsymbol(repo, symbol):
578 def isrevsymbol(repo, symbol):
579 """Checks if a symbol exists in the repo.
579 """Checks if a symbol exists in the repo.
580
580
581 See revsymbol() for details. Raises error.AmbiguousPrefixLookupError if the
581 See revsymbol() for details. Raises error.AmbiguousPrefixLookupError if the
582 symbol is an ambiguous nodeid prefix.
582 symbol is an ambiguous nodeid prefix.
583 """
583 """
584 try:
584 try:
585 revsymbol(repo, symbol)
585 revsymbol(repo, symbol)
586 return True
586 return True
587 except error.RepoLookupError:
587 except error.RepoLookupError:
588 return False
588 return False
589
589
590
590
591 def revsymbol(repo, symbol):
591 def revsymbol(repo, symbol):
592 """Returns a context given a single revision symbol (as string).
592 """Returns a context given a single revision symbol (as string).
593
593
594 This is similar to revsingle(), but accepts only a single revision symbol,
594 This is similar to revsingle(), but accepts only a single revision symbol,
595 i.e. things like ".", "tip", "1234", "deadbeef", "my-bookmark" work, but
595 i.e. things like ".", "tip", "1234", "deadbeef", "my-bookmark" work, but
596 not "max(public())".
596 not "max(public())".
597 """
597 """
598 if not isinstance(symbol, bytes):
598 if not isinstance(symbol, bytes):
599 msg = (
599 msg = (
600 b"symbol (%s of type %s) was not a string, did you mean "
600 b"symbol (%s of type %s) was not a string, did you mean "
601 b"repo[symbol]?" % (symbol, type(symbol))
601 b"repo[symbol]?" % (symbol, type(symbol))
602 )
602 )
603 raise error.ProgrammingError(msg)
603 raise error.ProgrammingError(msg)
604 try:
604 try:
605 if symbol in (b'.', b'tip', b'null'):
605 if symbol in (b'.', b'tip', b'null'):
606 return repo[symbol]
606 return repo[symbol]
607
607
608 try:
608 try:
609 r = int(symbol)
609 r = int(symbol)
610 if b'%d' % r != symbol:
610 if b'%d' % r != symbol:
611 raise ValueError
611 raise ValueError
612 l = len(repo.changelog)
612 l = len(repo.changelog)
613 if r < 0:
613 if r < 0:
614 r += l
614 r += l
615 if r < 0 or r >= l and r != wdirrev:
615 if r < 0 or r >= l and r != wdirrev:
616 raise ValueError
616 raise ValueError
617 return repo[r]
617 return repo[r]
618 except error.FilteredIndexError:
618 except error.FilteredIndexError:
619 raise
619 raise
620 except (ValueError, OverflowError, IndexError):
620 except (ValueError, OverflowError, IndexError):
621 pass
621 pass
622
622
623 if len(symbol) == 40:
623 if len(symbol) == 40:
624 try:
624 try:
625 node = bin(symbol)
625 node = bin(symbol)
626 rev = repo.changelog.rev(node)
626 rev = repo.changelog.rev(node)
627 return repo[rev]
627 return repo[rev]
628 except error.FilteredLookupError:
628 except error.FilteredLookupError:
629 raise
629 raise
630 except (TypeError, LookupError):
630 except (TypeError, LookupError):
631 pass
631 pass
632
632
633 # look up bookmarks through the name interface
633 # look up bookmarks through the name interface
634 try:
634 try:
635 node = repo.names.singlenode(repo, symbol)
635 node = repo.names.singlenode(repo, symbol)
636 rev = repo.changelog.rev(node)
636 rev = repo.changelog.rev(node)
637 return repo[rev]
637 return repo[rev]
638 except KeyError:
638 except KeyError:
639 pass
639 pass
640
640
641 node = resolvehexnodeidprefix(repo, symbol)
641 node = resolvehexnodeidprefix(repo, symbol)
642 if node is not None:
642 if node is not None:
643 rev = repo.changelog.rev(node)
643 rev = repo.changelog.rev(node)
644 return repo[rev]
644 return repo[rev]
645
645
646 raise error.RepoLookupError(_(b"unknown revision '%s'") % symbol)
646 raise error.RepoLookupError(_(b"unknown revision '%s'") % symbol)
647
647
648 except error.WdirUnsupported:
648 except error.WdirUnsupported:
649 return repo[None]
649 return repo[None]
650 except (
650 except (
651 error.FilteredIndexError,
651 error.FilteredIndexError,
652 error.FilteredLookupError,
652 error.FilteredLookupError,
653 error.FilteredRepoLookupError,
653 error.FilteredRepoLookupError,
654 ):
654 ):
655 raise _filterederror(repo, symbol)
655 raise _filterederror(repo, symbol)
656
656
657
657
658 def _filterederror(repo, changeid):
658 def _filterederror(repo, changeid):
659 """build an exception to be raised about a filtered changeid
659 """build an exception to be raised about a filtered changeid
660
660
661 This is extracted in a function to help extensions (eg: evolve) to
661 This is extracted in a function to help extensions (eg: evolve) to
662 experiment with various message variants."""
662 experiment with various message variants."""
663 if repo.filtername.startswith(b'visible'):
663 if repo.filtername.startswith(b'visible'):
664
664
665 # Check if the changeset is obsolete
665 # Check if the changeset is obsolete
666 unfilteredrepo = repo.unfiltered()
666 unfilteredrepo = repo.unfiltered()
667 ctx = revsymbol(unfilteredrepo, changeid)
667 ctx = revsymbol(unfilteredrepo, changeid)
668
668
669 # If the changeset is obsolete, enrich the message with the reason
669 # If the changeset is obsolete, enrich the message with the reason
670 # that made this changeset not visible
670 # that made this changeset not visible
671 if ctx.obsolete():
671 if ctx.obsolete():
672 msg = obsutil._getfilteredreason(repo, changeid, ctx)
672 msg = obsutil._getfilteredreason(repo, changeid, ctx)
673 else:
673 else:
674 msg = _(b"hidden revision '%s'") % changeid
674 msg = _(b"hidden revision '%s'") % changeid
675
675
676 hint = _(b'use --hidden to access hidden revisions')
676 hint = _(b'use --hidden to access hidden revisions')
677
677
678 return error.FilteredRepoLookupError(msg, hint=hint)
678 return error.FilteredRepoLookupError(msg, hint=hint)
679 msg = _(b"filtered revision '%s' (not in '%s' subset)")
679 msg = _(b"filtered revision '%s' (not in '%s' subset)")
680 msg %= (changeid, repo.filtername)
680 msg %= (changeid, repo.filtername)
681 return error.FilteredRepoLookupError(msg)
681 return error.FilteredRepoLookupError(msg)
682
682
683
683
684 def revsingle(repo, revspec, default=b'.', localalias=None):
684 def revsingle(repo, revspec, default=b'.', localalias=None):
685 if not revspec and revspec != 0:
685 if not revspec and revspec != 0:
686 return repo[default]
686 return repo[default]
687
687
688 l = revrange(repo, [revspec], localalias=localalias)
688 l = revrange(repo, [revspec], localalias=localalias)
689 if not l:
689 if not l:
690 raise error.Abort(_(b'empty revision set'))
690 raise error.Abort(_(b'empty revision set'))
691 return repo[l.last()]
691 return repo[l.last()]
692
692
693
693
694 def _pairspec(revspec):
694 def _pairspec(revspec):
695 tree = revsetlang.parse(revspec)
695 tree = revsetlang.parse(revspec)
696 return tree and tree[0] in (
696 return tree and tree[0] in (
697 b'range',
697 b'range',
698 b'rangepre',
698 b'rangepre',
699 b'rangepost',
699 b'rangepost',
700 b'rangeall',
700 b'rangeall',
701 )
701 )
702
702
703
703
704 def revpair(repo, revs):
704 def revpair(repo, revs):
705 if not revs:
705 if not revs:
706 return repo[b'.'], repo[None]
706 return repo[b'.'], repo[None]
707
707
708 l = revrange(repo, revs)
708 l = revrange(repo, revs)
709
709
710 if not l:
710 if not l:
711 raise error.Abort(_(b'empty revision range'))
711 raise error.Abort(_(b'empty revision range'))
712
712
713 first = l.first()
713 first = l.first()
714 second = l.last()
714 second = l.last()
715
715
716 if (
716 if (
717 first == second
717 first == second
718 and len(revs) >= 2
718 and len(revs) >= 2
719 and not all(revrange(repo, [r]) for r in revs)
719 and not all(revrange(repo, [r]) for r in revs)
720 ):
720 ):
721 raise error.Abort(_(b'empty revision on one side of range'))
721 raise error.Abort(_(b'empty revision on one side of range'))
722
722
723 # if top-level is range expression, the result must always be a pair
723 # if top-level is range expression, the result must always be a pair
724 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
724 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
725 return repo[first], repo[None]
725 return repo[first], repo[None]
726
726
727 return repo[first], repo[second]
727 return repo[first], repo[second]
728
728
729
729
730 def revrange(repo, specs, localalias=None):
730 def revrange(repo, specs, localalias=None):
731 """Execute 1 to many revsets and return the union.
731 """Execute 1 to many revsets and return the union.
732
732
733 This is the preferred mechanism for executing revsets using user-specified
733 This is the preferred mechanism for executing revsets using user-specified
734 config options, such as revset aliases.
734 config options, such as revset aliases.
735
735
736 The revsets specified by ``specs`` will be executed via a chained ``OR``
736 The revsets specified by ``specs`` will be executed via a chained ``OR``
737 expression. If ``specs`` is empty, an empty result is returned.
737 expression. If ``specs`` is empty, an empty result is returned.
738
738
739 ``specs`` can contain integers, in which case they are assumed to be
739 ``specs`` can contain integers, in which case they are assumed to be
740 revision numbers.
740 revision numbers.
741
741
742 It is assumed the revsets are already formatted. If you have arguments
742 It is assumed the revsets are already formatted. If you have arguments
743 that need to be expanded in the revset, call ``revsetlang.formatspec()``
743 that need to be expanded in the revset, call ``revsetlang.formatspec()``
744 and pass the result as an element of ``specs``.
744 and pass the result as an element of ``specs``.
745
745
746 Specifying a single revset is allowed.
746 Specifying a single revset is allowed.
747
747
748 Returns a ``revset.abstractsmartset`` which is a list-like interface over
748 Returns a ``revset.abstractsmartset`` which is a list-like interface over
749 integer revisions.
749 integer revisions.
750 """
750 """
751 allspecs = []
751 allspecs = []
752 for spec in specs:
752 for spec in specs:
753 if isinstance(spec, int):
753 if isinstance(spec, int):
754 spec = revsetlang.formatspec(b'%d', spec)
754 spec = revsetlang.formatspec(b'%d', spec)
755 allspecs.append(spec)
755 allspecs.append(spec)
756 return repo.anyrevs(allspecs, user=True, localalias=localalias)
756 return repo.anyrevs(allspecs, user=True, localalias=localalias)
757
757
758
758
759 def meaningfulparents(repo, ctx):
759 def meaningfulparents(repo, ctx):
760 """Return list of meaningful (or all if debug) parentrevs for rev.
760 """Return list of meaningful (or all if debug) parentrevs for rev.
761
761
762 For merges (two non-nullrev revisions) both parents are meaningful.
762 For merges (two non-nullrev revisions) both parents are meaningful.
763 Otherwise the first parent revision is considered meaningful if it
763 Otherwise the first parent revision is considered meaningful if it
764 is not the preceding revision.
764 is not the preceding revision.
765 """
765 """
766 parents = ctx.parents()
766 parents = ctx.parents()
767 if len(parents) > 1:
767 if len(parents) > 1:
768 return parents
768 return parents
769 if repo.ui.debugflag:
769 if repo.ui.debugflag:
770 return [parents[0], repo[nullrev]]
770 return [parents[0], repo[nullrev]]
771 if parents[0].rev() >= intrev(ctx) - 1:
771 if parents[0].rev() >= intrev(ctx) - 1:
772 return []
772 return []
773 return parents
773 return parents
774
774
775
775
776 def getuipathfn(repo, legacyrelativevalue=False, forcerelativevalue=None):
776 def getuipathfn(repo, legacyrelativevalue=False, forcerelativevalue=None):
777 """Return a function that produced paths for presenting to the user.
777 """Return a function that produced paths for presenting to the user.
778
778
779 The returned function takes a repo-relative path and produces a path
779 The returned function takes a repo-relative path and produces a path
780 that can be presented in the UI.
780 that can be presented in the UI.
781
781
782 Depending on the value of ui.relative-paths, either a repo-relative or
782 Depending on the value of ui.relative-paths, either a repo-relative or
783 cwd-relative path will be produced.
783 cwd-relative path will be produced.
784
784
785 legacyrelativevalue is the value to use if ui.relative-paths=legacy
785 legacyrelativevalue is the value to use if ui.relative-paths=legacy
786
786
787 If forcerelativevalue is not None, then that value will be used regardless
787 If forcerelativevalue is not None, then that value will be used regardless
788 of what ui.relative-paths is set to.
788 of what ui.relative-paths is set to.
789 """
789 """
790 if forcerelativevalue is not None:
790 if forcerelativevalue is not None:
791 relative = forcerelativevalue
791 relative = forcerelativevalue
792 else:
792 else:
793 config = repo.ui.config(b'ui', b'relative-paths')
793 config = repo.ui.config(b'ui', b'relative-paths')
794 if config == b'legacy':
794 if config == b'legacy':
795 relative = legacyrelativevalue
795 relative = legacyrelativevalue
796 else:
796 else:
797 relative = stringutil.parsebool(config)
797 relative = stringutil.parsebool(config)
798 if relative is None:
798 if relative is None:
799 raise error.ConfigError(
799 raise error.ConfigError(
800 _(b"ui.relative-paths is not a boolean ('%s')") % config
800 _(b"ui.relative-paths is not a boolean ('%s')") % config
801 )
801 )
802
802
803 if relative:
803 if relative:
804 cwd = repo.getcwd()
804 cwd = repo.getcwd()
805 pathto = repo.pathto
805 pathto = repo.pathto
806 return lambda f: pathto(f, cwd)
806 return lambda f: pathto(f, cwd)
807 elif repo.ui.configbool(b'ui', b'slash'):
807 elif repo.ui.configbool(b'ui', b'slash'):
808 return lambda f: f
808 return lambda f: f
809 else:
809 else:
810 return util.localpath
810 return util.localpath
811
811
812
812
813 def subdiruipathfn(subpath, uipathfn):
813 def subdiruipathfn(subpath, uipathfn):
814 '''Create a new uipathfn that treats the file as relative to subpath.'''
814 '''Create a new uipathfn that treats the file as relative to subpath.'''
815 return lambda f: uipathfn(posixpath.join(subpath, f))
815 return lambda f: uipathfn(posixpath.join(subpath, f))
816
816
817
817
818 def anypats(pats, opts):
818 def anypats(pats, opts):
819 '''Checks if any patterns, including --include and --exclude were given.
819 '''Checks if any patterns, including --include and --exclude were given.
820
820
821 Some commands (e.g. addremove) use this condition for deciding whether to
821 Some commands (e.g. addremove) use this condition for deciding whether to
822 print absolute or relative paths.
822 print absolute or relative paths.
823 '''
823 '''
824 return bool(pats or opts.get(b'include') or opts.get(b'exclude'))
824 return bool(pats or opts.get(b'include') or opts.get(b'exclude'))
825
825
826
826
827 def expandpats(pats):
827 def expandpats(pats):
828 '''Expand bare globs when running on windows.
828 '''Expand bare globs when running on windows.
829 On posix we assume it already has already been done by sh.'''
829 On posix we assume it already has already been done by sh.'''
830 if not util.expandglobs:
830 if not util.expandglobs:
831 return list(pats)
831 return list(pats)
832 ret = []
832 ret = []
833 for kindpat in pats:
833 for kindpat in pats:
834 kind, pat = matchmod._patsplit(kindpat, None)
834 kind, pat = matchmod._patsplit(kindpat, None)
835 if kind is None:
835 if kind is None:
836 try:
836 try:
837 globbed = glob.glob(pat)
837 globbed = glob.glob(pat)
838 except re.error:
838 except re.error:
839 globbed = [pat]
839 globbed = [pat]
840 if globbed:
840 if globbed:
841 ret.extend(globbed)
841 ret.extend(globbed)
842 continue
842 continue
843 ret.append(kindpat)
843 ret.append(kindpat)
844 return ret
844 return ret
845
845
846
846
847 def matchandpats(
847 def matchandpats(
848 ctx, pats=(), opts=None, globbed=False, default=b'relpath', badfn=None
848 ctx, pats=(), opts=None, globbed=False, default=b'relpath', badfn=None
849 ):
849 ):
850 '''Return a matcher and the patterns that were used.
850 '''Return a matcher and the patterns that were used.
851 The matcher will warn about bad matches, unless an alternate badfn callback
851 The matcher will warn about bad matches, unless an alternate badfn callback
852 is provided.'''
852 is provided.'''
853 if opts is None:
853 if opts is None:
854 opts = {}
854 opts = {}
855 if not globbed and default == b'relpath':
855 if not globbed and default == b'relpath':
856 pats = expandpats(pats or [])
856 pats = expandpats(pats or [])
857
857
858 uipathfn = getuipathfn(ctx.repo(), legacyrelativevalue=True)
858 uipathfn = getuipathfn(ctx.repo(), legacyrelativevalue=True)
859
859
860 def bad(f, msg):
860 def bad(f, msg):
861 ctx.repo().ui.warn(b"%s: %s\n" % (uipathfn(f), msg))
861 ctx.repo().ui.warn(b"%s: %s\n" % (uipathfn(f), msg))
862
862
863 if badfn is None:
863 if badfn is None:
864 badfn = bad
864 badfn = bad
865
865
866 m = ctx.match(
866 m = ctx.match(
867 pats,
867 pats,
868 opts.get(b'include'),
868 opts.get(b'include'),
869 opts.get(b'exclude'),
869 opts.get(b'exclude'),
870 default,
870 default,
871 listsubrepos=opts.get(b'subrepos'),
871 listsubrepos=opts.get(b'subrepos'),
872 badfn=badfn,
872 badfn=badfn,
873 )
873 )
874
874
875 if m.always():
875 if m.always():
876 pats = []
876 pats = []
877 return m, pats
877 return m, pats
878
878
879
879
880 def match(
880 def match(
881 ctx, pats=(), opts=None, globbed=False, default=b'relpath', badfn=None
881 ctx, pats=(), opts=None, globbed=False, default=b'relpath', badfn=None
882 ):
882 ):
883 '''Return a matcher that will warn about bad matches.'''
883 '''Return a matcher that will warn about bad matches.'''
884 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
884 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
885
885
886
886
887 def matchall(repo):
887 def matchall(repo):
888 '''Return a matcher that will efficiently match everything.'''
888 '''Return a matcher that will efficiently match everything.'''
889 return matchmod.always()
889 return matchmod.always()
890
890
891
891
892 def matchfiles(repo, files, badfn=None):
892 def matchfiles(repo, files, badfn=None):
893 '''Return a matcher that will efficiently match exactly these files.'''
893 '''Return a matcher that will efficiently match exactly these files.'''
894 return matchmod.exact(files, badfn=badfn)
894 return matchmod.exact(files, badfn=badfn)
895
895
896
896
897 def parsefollowlinespattern(repo, rev, pat, msg):
897 def parsefollowlinespattern(repo, rev, pat, msg):
898 """Return a file name from `pat` pattern suitable for usage in followlines
898 """Return a file name from `pat` pattern suitable for usage in followlines
899 logic.
899 logic.
900 """
900 """
901 if not matchmod.patkind(pat):
901 if not matchmod.patkind(pat):
902 return pathutil.canonpath(repo.root, repo.getcwd(), pat)
902 return pathutil.canonpath(repo.root, repo.getcwd(), pat)
903 else:
903 else:
904 ctx = repo[rev]
904 ctx = repo[rev]
905 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=ctx)
905 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=ctx)
906 files = [f for f in ctx if m(f)]
906 files = [f for f in ctx if m(f)]
907 if len(files) != 1:
907 if len(files) != 1:
908 raise error.ParseError(msg)
908 raise error.ParseError(msg)
909 return files[0]
909 return files[0]
910
910
911
911
912 def getorigvfs(ui, repo):
912 def getorigvfs(ui, repo):
913 """return a vfs suitable to save 'orig' file
913 """return a vfs suitable to save 'orig' file
914
914
915 return None if no special directory is configured"""
915 return None if no special directory is configured"""
916 origbackuppath = ui.config(b'ui', b'origbackuppath')
916 origbackuppath = ui.config(b'ui', b'origbackuppath')
917 if not origbackuppath:
917 if not origbackuppath:
918 return None
918 return None
919 return vfs.vfs(repo.wvfs.join(origbackuppath))
919 return vfs.vfs(repo.wvfs.join(origbackuppath))
920
920
921
921
922 def backuppath(ui, repo, filepath):
922 def backuppath(ui, repo, filepath):
923 '''customize where working copy backup files (.orig files) are created
923 '''customize where working copy backup files (.orig files) are created
924
924
925 Fetch user defined path from config file: [ui] origbackuppath = <path>
925 Fetch user defined path from config file: [ui] origbackuppath = <path>
926 Fall back to default (filepath with .orig suffix) if not specified
926 Fall back to default (filepath with .orig suffix) if not specified
927
927
928 filepath is repo-relative
928 filepath is repo-relative
929
929
930 Returns an absolute path
930 Returns an absolute path
931 '''
931 '''
932 origvfs = getorigvfs(ui, repo)
932 origvfs = getorigvfs(ui, repo)
933 if origvfs is None:
933 if origvfs is None:
934 return repo.wjoin(filepath + b".orig")
934 return repo.wjoin(filepath + b".orig")
935
935
936 origbackupdir = origvfs.dirname(filepath)
936 origbackupdir = origvfs.dirname(filepath)
937 if not origvfs.isdir(origbackupdir) or origvfs.islink(origbackupdir):
937 if not origvfs.isdir(origbackupdir) or origvfs.islink(origbackupdir):
938 ui.note(_(b'creating directory: %s\n') % origvfs.join(origbackupdir))
938 ui.note(_(b'creating directory: %s\n') % origvfs.join(origbackupdir))
939
939
940 # Remove any files that conflict with the backup file's path
940 # Remove any files that conflict with the backup file's path
941 for f in reversed(list(pathutil.finddirs(filepath))):
941 for f in reversed(list(pathutil.finddirs(filepath))):
942 if origvfs.isfileorlink(f):
942 if origvfs.isfileorlink(f):
943 ui.note(_(b'removing conflicting file: %s\n') % origvfs.join(f))
943 ui.note(_(b'removing conflicting file: %s\n') % origvfs.join(f))
944 origvfs.unlink(f)
944 origvfs.unlink(f)
945 break
945 break
946
946
947 origvfs.makedirs(origbackupdir)
947 origvfs.makedirs(origbackupdir)
948
948
949 if origvfs.isdir(filepath) and not origvfs.islink(filepath):
949 if origvfs.isdir(filepath) and not origvfs.islink(filepath):
950 ui.note(
950 ui.note(
951 _(b'removing conflicting directory: %s\n') % origvfs.join(filepath)
951 _(b'removing conflicting directory: %s\n') % origvfs.join(filepath)
952 )
952 )
953 origvfs.rmtree(filepath, forcibly=True)
953 origvfs.rmtree(filepath, forcibly=True)
954
954
955 return origvfs.join(filepath)
955 return origvfs.join(filepath)
956
956
957
957
958 class _containsnode(object):
958 class _containsnode(object):
959 """proxy __contains__(node) to container.__contains__ which accepts revs"""
959 """proxy __contains__(node) to container.__contains__ which accepts revs"""
960
960
961 def __init__(self, repo, revcontainer):
961 def __init__(self, repo, revcontainer):
962 self._torev = repo.changelog.rev
962 self._torev = repo.changelog.rev
963 self._revcontains = revcontainer.__contains__
963 self._revcontains = revcontainer.__contains__
964
964
965 def __contains__(self, node):
965 def __contains__(self, node):
966 return self._revcontains(self._torev(node))
966 return self._revcontains(self._torev(node))
967
967
968
968
969 def cleanupnodes(
969 def cleanupnodes(
970 repo,
970 repo,
971 replacements,
971 replacements,
972 operation,
972 operation,
973 moves=None,
973 moves=None,
974 metadata=None,
974 metadata=None,
975 fixphase=False,
975 fixphase=False,
976 targetphase=None,
976 targetphase=None,
977 backup=True,
977 backup=True,
978 ):
978 ):
979 """do common cleanups when old nodes are replaced by new nodes
979 """do common cleanups when old nodes are replaced by new nodes
980
980
981 That includes writing obsmarkers or stripping nodes, and moving bookmarks.
981 That includes writing obsmarkers or stripping nodes, and moving bookmarks.
982 (we might also want to move working directory parent in the future)
982 (we might also want to move working directory parent in the future)
983
983
984 By default, bookmark moves are calculated automatically from 'replacements',
984 By default, bookmark moves are calculated automatically from 'replacements',
985 but 'moves' can be used to override that. Also, 'moves' may include
985 but 'moves' can be used to override that. Also, 'moves' may include
986 additional bookmark moves that should not have associated obsmarkers.
986 additional bookmark moves that should not have associated obsmarkers.
987
987
988 replacements is {oldnode: [newnode]} or a iterable of nodes if they do not
988 replacements is {oldnode: [newnode]} or a iterable of nodes if they do not
989 have replacements. operation is a string, like "rebase".
989 have replacements. operation is a string, like "rebase".
990
990
991 metadata is dictionary containing metadata to be stored in obsmarker if
991 metadata is dictionary containing metadata to be stored in obsmarker if
992 obsolescence is enabled.
992 obsolescence is enabled.
993 """
993 """
994 assert fixphase or targetphase is None
994 assert fixphase or targetphase is None
995 if not replacements and not moves:
995 if not replacements and not moves:
996 return
996 return
997
997
998 # translate mapping's other forms
998 # translate mapping's other forms
999 if not util.safehasattr(replacements, b'items'):
999 if not util.safehasattr(replacements, b'items'):
1000 replacements = {(n,): () for n in replacements}
1000 replacements = {(n,): () for n in replacements}
1001 else:
1001 else:
1002 # upgrading non tuple "source" to tuple ones for BC
1002 # upgrading non tuple "source" to tuple ones for BC
1003 repls = {}
1003 repls = {}
1004 for key, value in replacements.items():
1004 for key, value in replacements.items():
1005 if not isinstance(key, tuple):
1005 if not isinstance(key, tuple):
1006 key = (key,)
1006 key = (key,)
1007 repls[key] = value
1007 repls[key] = value
1008 replacements = repls
1008 replacements = repls
1009
1009
1010 # Unfiltered repo is needed since nodes in replacements might be hidden.
1010 # Unfiltered repo is needed since nodes in replacements might be hidden.
1011 unfi = repo.unfiltered()
1011 unfi = repo.unfiltered()
1012
1012
1013 # Calculate bookmark movements
1013 # Calculate bookmark movements
1014 if moves is None:
1014 if moves is None:
1015 moves = {}
1015 moves = {}
1016 for oldnodes, newnodes in replacements.items():
1016 for oldnodes, newnodes in replacements.items():
1017 for oldnode in oldnodes:
1017 for oldnode in oldnodes:
1018 if oldnode in moves:
1018 if oldnode in moves:
1019 continue
1019 continue
1020 if len(newnodes) > 1:
1020 if len(newnodes) > 1:
1021 # usually a split, take the one with biggest rev number
1021 # usually a split, take the one with biggest rev number
1022 newnode = next(unfi.set(b'max(%ln)', newnodes)).node()
1022 newnode = next(unfi.set(b'max(%ln)', newnodes)).node()
1023 elif len(newnodes) == 0:
1023 elif len(newnodes) == 0:
1024 # move bookmark backwards
1024 # move bookmark backwards
1025 allreplaced = []
1025 allreplaced = []
1026 for rep in replacements:
1026 for rep in replacements:
1027 allreplaced.extend(rep)
1027 allreplaced.extend(rep)
1028 roots = list(
1028 roots = list(
1029 unfi.set(b'max((::%n) - %ln)', oldnode, allreplaced)
1029 unfi.set(b'max((::%n) - %ln)', oldnode, allreplaced)
1030 )
1030 )
1031 if roots:
1031 if roots:
1032 newnode = roots[0].node()
1032 newnode = roots[0].node()
1033 else:
1033 else:
1034 newnode = nullid
1034 newnode = nullid
1035 else:
1035 else:
1036 newnode = newnodes[0]
1036 newnode = newnodes[0]
1037 moves[oldnode] = newnode
1037 moves[oldnode] = newnode
1038
1038
1039 allnewnodes = [n for ns in replacements.values() for n in ns]
1039 allnewnodes = [n for ns in replacements.values() for n in ns]
1040 toretract = {}
1040 toretract = {}
1041 toadvance = {}
1041 toadvance = {}
1042 if fixphase:
1042 if fixphase:
1043 precursors = {}
1043 precursors = {}
1044 for oldnodes, newnodes in replacements.items():
1044 for oldnodes, newnodes in replacements.items():
1045 for oldnode in oldnodes:
1045 for oldnode in oldnodes:
1046 for newnode in newnodes:
1046 for newnode in newnodes:
1047 precursors.setdefault(newnode, []).append(oldnode)
1047 precursors.setdefault(newnode, []).append(oldnode)
1048
1048
1049 allnewnodes.sort(key=lambda n: unfi[n].rev())
1049 allnewnodes.sort(key=lambda n: unfi[n].rev())
1050 newphases = {}
1050 newphases = {}
1051
1051
1052 def phase(ctx):
1052 def phase(ctx):
1053 return newphases.get(ctx.node(), ctx.phase())
1053 return newphases.get(ctx.node(), ctx.phase())
1054
1054
1055 for newnode in allnewnodes:
1055 for newnode in allnewnodes:
1056 ctx = unfi[newnode]
1056 ctx = unfi[newnode]
1057 parentphase = max(phase(p) for p in ctx.parents())
1057 parentphase = max(phase(p) for p in ctx.parents())
1058 if targetphase is None:
1058 if targetphase is None:
1059 oldphase = max(
1059 oldphase = max(
1060 unfi[oldnode].phase() for oldnode in precursors[newnode]
1060 unfi[oldnode].phase() for oldnode in precursors[newnode]
1061 )
1061 )
1062 newphase = max(oldphase, parentphase)
1062 newphase = max(oldphase, parentphase)
1063 else:
1063 else:
1064 newphase = max(targetphase, parentphase)
1064 newphase = max(targetphase, parentphase)
1065 newphases[newnode] = newphase
1065 newphases[newnode] = newphase
1066 if newphase > ctx.phase():
1066 if newphase > ctx.phase():
1067 toretract.setdefault(newphase, []).append(newnode)
1067 toretract.setdefault(newphase, []).append(newnode)
1068 elif newphase < ctx.phase():
1068 elif newphase < ctx.phase():
1069 toadvance.setdefault(newphase, []).append(newnode)
1069 toadvance.setdefault(newphase, []).append(newnode)
1070
1070
1071 with repo.transaction(b'cleanup') as tr:
1071 with repo.transaction(b'cleanup') as tr:
1072 # Move bookmarks
1072 # Move bookmarks
1073 bmarks = repo._bookmarks
1073 bmarks = repo._bookmarks
1074 bmarkchanges = []
1074 bmarkchanges = []
1075 for oldnode, newnode in moves.items():
1075 for oldnode, newnode in moves.items():
1076 oldbmarks = repo.nodebookmarks(oldnode)
1076 oldbmarks = repo.nodebookmarks(oldnode)
1077 if not oldbmarks:
1077 if not oldbmarks:
1078 continue
1078 continue
1079 from . import bookmarks # avoid import cycle
1079 from . import bookmarks # avoid import cycle
1080
1080
1081 repo.ui.debug(
1081 repo.ui.debug(
1082 b'moving bookmarks %r from %s to %s\n'
1082 b'moving bookmarks %r from %s to %s\n'
1083 % (
1083 % (
1084 pycompat.rapply(pycompat.maybebytestr, oldbmarks),
1084 pycompat.rapply(pycompat.maybebytestr, oldbmarks),
1085 hex(oldnode),
1085 hex(oldnode),
1086 hex(newnode),
1086 hex(newnode),
1087 )
1087 )
1088 )
1088 )
1089 # Delete divergent bookmarks being parents of related newnodes
1089 # Delete divergent bookmarks being parents of related newnodes
1090 deleterevs = repo.revs(
1090 deleterevs = repo.revs(
1091 b'parents(roots(%ln & (::%n))) - parents(%n)',
1091 b'parents(roots(%ln & (::%n))) - parents(%n)',
1092 allnewnodes,
1092 allnewnodes,
1093 newnode,
1093 newnode,
1094 oldnode,
1094 oldnode,
1095 )
1095 )
1096 deletenodes = _containsnode(repo, deleterevs)
1096 deletenodes = _containsnode(repo, deleterevs)
1097 for name in oldbmarks:
1097 for name in oldbmarks:
1098 bmarkchanges.append((name, newnode))
1098 bmarkchanges.append((name, newnode))
1099 for b in bookmarks.divergent2delete(repo, deletenodes, name):
1099 for b in bookmarks.divergent2delete(repo, deletenodes, name):
1100 bmarkchanges.append((b, None))
1100 bmarkchanges.append((b, None))
1101
1101
1102 if bmarkchanges:
1102 if bmarkchanges:
1103 bmarks.applychanges(repo, tr, bmarkchanges)
1103 bmarks.applychanges(repo, tr, bmarkchanges)
1104
1104
1105 for phase, nodes in toretract.items():
1105 for phase, nodes in toretract.items():
1106 phases.retractboundary(repo, tr, phase, nodes)
1106 phases.retractboundary(repo, tr, phase, nodes)
1107 for phase, nodes in toadvance.items():
1107 for phase, nodes in toadvance.items():
1108 phases.advanceboundary(repo, tr, phase, nodes)
1108 phases.advanceboundary(repo, tr, phase, nodes)
1109
1109
1110 mayusearchived = repo.ui.config(b'experimental', b'cleanup-as-archived')
1110 mayusearchived = repo.ui.config(b'experimental', b'cleanup-as-archived')
1111 # Obsolete or strip nodes
1111 # Obsolete or strip nodes
1112 if obsolete.isenabled(repo, obsolete.createmarkersopt):
1112 if obsolete.isenabled(repo, obsolete.createmarkersopt):
1113 # If a node is already obsoleted, and we want to obsolete it
1113 # If a node is already obsoleted, and we want to obsolete it
1114 # without a successor, skip that obssolete request since it's
1114 # without a successor, skip that obssolete request since it's
1115 # unnecessary. That's the "if s or not isobs(n)" check below.
1115 # unnecessary. That's the "if s or not isobs(n)" check below.
1116 # Also sort the node in topology order, that might be useful for
1116 # Also sort the node in topology order, that might be useful for
1117 # some obsstore logic.
1117 # some obsstore logic.
1118 # NOTE: the sorting might belong to createmarkers.
1118 # NOTE: the sorting might belong to createmarkers.
1119 torev = unfi.changelog.rev
1119 torev = unfi.changelog.rev
1120 sortfunc = lambda ns: torev(ns[0][0])
1120 sortfunc = lambda ns: torev(ns[0][0])
1121 rels = []
1121 rels = []
1122 for ns, s in sorted(replacements.items(), key=sortfunc):
1122 for ns, s in sorted(replacements.items(), key=sortfunc):
1123 rel = (tuple(unfi[n] for n in ns), tuple(unfi[m] for m in s))
1123 rel = (tuple(unfi[n] for n in ns), tuple(unfi[m] for m in s))
1124 rels.append(rel)
1124 rels.append(rel)
1125 if rels:
1125 if rels:
1126 obsolete.createmarkers(
1126 obsolete.createmarkers(
1127 repo, rels, operation=operation, metadata=metadata
1127 repo, rels, operation=operation, metadata=metadata
1128 )
1128 )
1129 elif phases.supportinternal(repo) and mayusearchived:
1129 elif phases.supportinternal(repo) and mayusearchived:
1130 # this assume we do not have "unstable" nodes above the cleaned ones
1130 # this assume we do not have "unstable" nodes above the cleaned ones
1131 allreplaced = set()
1131 allreplaced = set()
1132 for ns in replacements.keys():
1132 for ns in replacements.keys():
1133 allreplaced.update(ns)
1133 allreplaced.update(ns)
1134 if backup:
1134 if backup:
1135 from . import repair # avoid import cycle
1135 from . import repair # avoid import cycle
1136
1136
1137 node = min(allreplaced, key=repo.changelog.rev)
1137 node = min(allreplaced, key=repo.changelog.rev)
1138 repair.backupbundle(
1138 repair.backupbundle(
1139 repo, allreplaced, allreplaced, node, operation
1139 repo, allreplaced, allreplaced, node, operation
1140 )
1140 )
1141 phases.retractboundary(repo, tr, phases.archived, allreplaced)
1141 phases.retractboundary(repo, tr, phases.archived, allreplaced)
1142 else:
1142 else:
1143 from . import repair # avoid import cycle
1143 from . import repair # avoid import cycle
1144
1144
1145 tostrip = list(n for ns in replacements for n in ns)
1145 tostrip = list(n for ns in replacements for n in ns)
1146 if tostrip:
1146 if tostrip:
1147 repair.delayedstrip(
1147 repair.delayedstrip(
1148 repo.ui, repo, tostrip, operation, backup=backup
1148 repo.ui, repo, tostrip, operation, backup=backup
1149 )
1149 )
1150
1150
1151
1151
1152 def addremove(repo, matcher, prefix, uipathfn, opts=None):
1152 def addremove(repo, matcher, prefix, uipathfn, opts=None):
1153 if opts is None:
1153 if opts is None:
1154 opts = {}
1154 opts = {}
1155 m = matcher
1155 m = matcher
1156 dry_run = opts.get(b'dry_run')
1156 dry_run = opts.get(b'dry_run')
1157 try:
1157 try:
1158 similarity = float(opts.get(b'similarity') or 0)
1158 similarity = float(opts.get(b'similarity') or 0)
1159 except ValueError:
1159 except ValueError:
1160 raise error.Abort(_(b'similarity must be a number'))
1160 raise error.Abort(_(b'similarity must be a number'))
1161 if similarity < 0 or similarity > 100:
1161 if similarity < 0 or similarity > 100:
1162 raise error.Abort(_(b'similarity must be between 0 and 100'))
1162 raise error.Abort(_(b'similarity must be between 0 and 100'))
1163 similarity /= 100.0
1163 similarity /= 100.0
1164
1164
1165 ret = 0
1165 ret = 0
1166
1166
1167 wctx = repo[None]
1167 wctx = repo[None]
1168 for subpath in sorted(wctx.substate):
1168 for subpath in sorted(wctx.substate):
1169 submatch = matchmod.subdirmatcher(subpath, m)
1169 submatch = matchmod.subdirmatcher(subpath, m)
1170 if opts.get(b'subrepos') or m.exact(subpath) or any(submatch.files()):
1170 if opts.get(b'subrepos') or m.exact(subpath) or any(submatch.files()):
1171 sub = wctx.sub(subpath)
1171 sub = wctx.sub(subpath)
1172 subprefix = repo.wvfs.reljoin(prefix, subpath)
1172 subprefix = repo.wvfs.reljoin(prefix, subpath)
1173 subuipathfn = subdiruipathfn(subpath, uipathfn)
1173 subuipathfn = subdiruipathfn(subpath, uipathfn)
1174 try:
1174 try:
1175 if sub.addremove(submatch, subprefix, subuipathfn, opts):
1175 if sub.addremove(submatch, subprefix, subuipathfn, opts):
1176 ret = 1
1176 ret = 1
1177 except error.LookupError:
1177 except error.LookupError:
1178 repo.ui.status(
1178 repo.ui.status(
1179 _(b"skipping missing subrepository: %s\n")
1179 _(b"skipping missing subrepository: %s\n")
1180 % uipathfn(subpath)
1180 % uipathfn(subpath)
1181 )
1181 )
1182
1182
1183 rejected = []
1183 rejected = []
1184
1184
1185 def badfn(f, msg):
1185 def badfn(f, msg):
1186 if f in m.files():
1186 if f in m.files():
1187 m.bad(f, msg)
1187 m.bad(f, msg)
1188 rejected.append(f)
1188 rejected.append(f)
1189
1189
1190 badmatch = matchmod.badmatch(m, badfn)
1190 badmatch = matchmod.badmatch(m, badfn)
1191 added, unknown, deleted, removed, forgotten = _interestingfiles(
1191 added, unknown, deleted, removed, forgotten = _interestingfiles(
1192 repo, badmatch
1192 repo, badmatch
1193 )
1193 )
1194
1194
1195 unknownset = set(unknown + forgotten)
1195 unknownset = set(unknown + forgotten)
1196 toprint = unknownset.copy()
1196 toprint = unknownset.copy()
1197 toprint.update(deleted)
1197 toprint.update(deleted)
1198 for abs in sorted(toprint):
1198 for abs in sorted(toprint):
1199 if repo.ui.verbose or not m.exact(abs):
1199 if repo.ui.verbose or not m.exact(abs):
1200 if abs in unknownset:
1200 if abs in unknownset:
1201 status = _(b'adding %s\n') % uipathfn(abs)
1201 status = _(b'adding %s\n') % uipathfn(abs)
1202 label = b'ui.addremove.added'
1202 label = b'ui.addremove.added'
1203 else:
1203 else:
1204 status = _(b'removing %s\n') % uipathfn(abs)
1204 status = _(b'removing %s\n') % uipathfn(abs)
1205 label = b'ui.addremove.removed'
1205 label = b'ui.addremove.removed'
1206 repo.ui.status(status, label=label)
1206 repo.ui.status(status, label=label)
1207
1207
1208 renames = _findrenames(
1208 renames = _findrenames(
1209 repo, m, added + unknown, removed + deleted, similarity, uipathfn
1209 repo, m, added + unknown, removed + deleted, similarity, uipathfn
1210 )
1210 )
1211
1211
1212 if not dry_run:
1212 if not dry_run:
1213 _markchanges(repo, unknown + forgotten, deleted, renames)
1213 _markchanges(repo, unknown + forgotten, deleted, renames)
1214
1214
1215 for f in rejected:
1215 for f in rejected:
1216 if f in m.files():
1216 if f in m.files():
1217 return 1
1217 return 1
1218 return ret
1218 return ret
1219
1219
1220
1220
1221 def marktouched(repo, files, similarity=0.0):
1221 def marktouched(repo, files, similarity=0.0):
1222 '''Assert that files have somehow been operated upon. files are relative to
1222 '''Assert that files have somehow been operated upon. files are relative to
1223 the repo root.'''
1223 the repo root.'''
1224 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
1224 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
1225 rejected = []
1225 rejected = []
1226
1226
1227 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
1227 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
1228
1228
1229 if repo.ui.verbose:
1229 if repo.ui.verbose:
1230 unknownset = set(unknown + forgotten)
1230 unknownset = set(unknown + forgotten)
1231 toprint = unknownset.copy()
1231 toprint = unknownset.copy()
1232 toprint.update(deleted)
1232 toprint.update(deleted)
1233 for abs in sorted(toprint):
1233 for abs in sorted(toprint):
1234 if abs in unknownset:
1234 if abs in unknownset:
1235 status = _(b'adding %s\n') % abs
1235 status = _(b'adding %s\n') % abs
1236 else:
1236 else:
1237 status = _(b'removing %s\n') % abs
1237 status = _(b'removing %s\n') % abs
1238 repo.ui.status(status)
1238 repo.ui.status(status)
1239
1239
1240 # TODO: We should probably have the caller pass in uipathfn and apply it to
1240 # TODO: We should probably have the caller pass in uipathfn and apply it to
1241 # the messages above too. legacyrelativevalue=True is consistent with how
1241 # the messages above too. legacyrelativevalue=True is consistent with how
1242 # it used to work.
1242 # it used to work.
1243 uipathfn = getuipathfn(repo, legacyrelativevalue=True)
1243 uipathfn = getuipathfn(repo, legacyrelativevalue=True)
1244 renames = _findrenames(
1244 renames = _findrenames(
1245 repo, m, added + unknown, removed + deleted, similarity, uipathfn
1245 repo, m, added + unknown, removed + deleted, similarity, uipathfn
1246 )
1246 )
1247
1247
1248 _markchanges(repo, unknown + forgotten, deleted, renames)
1248 _markchanges(repo, unknown + forgotten, deleted, renames)
1249
1249
1250 for f in rejected:
1250 for f in rejected:
1251 if f in m.files():
1251 if f in m.files():
1252 return 1
1252 return 1
1253 return 0
1253 return 0
1254
1254
1255
1255
1256 def _interestingfiles(repo, matcher):
1256 def _interestingfiles(repo, matcher):
1257 '''Walk dirstate with matcher, looking for files that addremove would care
1257 '''Walk dirstate with matcher, looking for files that addremove would care
1258 about.
1258 about.
1259
1259
1260 This is different from dirstate.status because it doesn't care about
1260 This is different from dirstate.status because it doesn't care about
1261 whether files are modified or clean.'''
1261 whether files are modified or clean.'''
1262 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1262 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1263 audit_path = pathutil.pathauditor(repo.root, cached=True)
1263 audit_path = pathutil.pathauditor(repo.root, cached=True)
1264
1264
1265 ctx = repo[None]
1265 ctx = repo[None]
1266 dirstate = repo.dirstate
1266 dirstate = repo.dirstate
1267 matcher = repo.narrowmatch(matcher, includeexact=True)
1267 matcher = repo.narrowmatch(matcher, includeexact=True)
1268 walkresults = dirstate.walk(
1268 walkresults = dirstate.walk(
1269 matcher,
1269 matcher,
1270 subrepos=sorted(ctx.substate),
1270 subrepos=sorted(ctx.substate),
1271 unknown=True,
1271 unknown=True,
1272 ignored=False,
1272 ignored=False,
1273 full=False,
1273 full=False,
1274 )
1274 )
1275 for abs, st in pycompat.iteritems(walkresults):
1275 for abs, st in pycompat.iteritems(walkresults):
1276 dstate = dirstate[abs]
1276 dstate = dirstate[abs]
1277 if dstate == b'?' and audit_path.check(abs):
1277 if dstate == b'?' and audit_path.check(abs):
1278 unknown.append(abs)
1278 unknown.append(abs)
1279 elif dstate != b'r' and not st:
1279 elif dstate != b'r' and not st:
1280 deleted.append(abs)
1280 deleted.append(abs)
1281 elif dstate == b'r' and st:
1281 elif dstate == b'r' and st:
1282 forgotten.append(abs)
1282 forgotten.append(abs)
1283 # for finding renames
1283 # for finding renames
1284 elif dstate == b'r' and not st:
1284 elif dstate == b'r' and not st:
1285 removed.append(abs)
1285 removed.append(abs)
1286 elif dstate == b'a':
1286 elif dstate == b'a':
1287 added.append(abs)
1287 added.append(abs)
1288
1288
1289 return added, unknown, deleted, removed, forgotten
1289 return added, unknown, deleted, removed, forgotten
1290
1290
1291
1291
1292 def _findrenames(repo, matcher, added, removed, similarity, uipathfn):
1292 def _findrenames(repo, matcher, added, removed, similarity, uipathfn):
1293 '''Find renames from removed files to added ones.'''
1293 '''Find renames from removed files to added ones.'''
1294 renames = {}
1294 renames = {}
1295 if similarity > 0:
1295 if similarity > 0:
1296 for old, new, score in similar.findrenames(
1296 for old, new, score in similar.findrenames(
1297 repo, added, removed, similarity
1297 repo, added, removed, similarity
1298 ):
1298 ):
1299 if (
1299 if (
1300 repo.ui.verbose
1300 repo.ui.verbose
1301 or not matcher.exact(old)
1301 or not matcher.exact(old)
1302 or not matcher.exact(new)
1302 or not matcher.exact(new)
1303 ):
1303 ):
1304 repo.ui.status(
1304 repo.ui.status(
1305 _(
1305 _(
1306 b'recording removal of %s as rename to %s '
1306 b'recording removal of %s as rename to %s '
1307 b'(%d%% similar)\n'
1307 b'(%d%% similar)\n'
1308 )
1308 )
1309 % (uipathfn(old), uipathfn(new), score * 100)
1309 % (uipathfn(old), uipathfn(new), score * 100)
1310 )
1310 )
1311 renames[new] = old
1311 renames[new] = old
1312 return renames
1312 return renames
1313
1313
1314
1314
1315 def _markchanges(repo, unknown, deleted, renames):
1315 def _markchanges(repo, unknown, deleted, renames):
1316 '''Marks the files in unknown as added, the files in deleted as removed,
1316 '''Marks the files in unknown as added, the files in deleted as removed,
1317 and the files in renames as copied.'''
1317 and the files in renames as copied.'''
1318 wctx = repo[None]
1318 wctx = repo[None]
1319 with repo.wlock():
1319 with repo.wlock():
1320 wctx.forget(deleted)
1320 wctx.forget(deleted)
1321 wctx.add(unknown)
1321 wctx.add(unknown)
1322 for new, old in pycompat.iteritems(renames):
1322 for new, old in pycompat.iteritems(renames):
1323 wctx.copy(old, new)
1323 wctx.copy(old, new)
1324
1324
1325
1325
1326 def getrenamedfn(repo, endrev=None):
1326 def getrenamedfn(repo, endrev=None):
1327 if copiesmod.usechangesetcentricalgo(repo):
1327 if copiesmod.usechangesetcentricalgo(repo):
1328
1328
1329 def getrenamed(fn, rev):
1329 def getrenamed(fn, rev):
1330 ctx = repo[rev]
1330 ctx = repo[rev]
1331 p1copies = ctx.p1copies()
1331 p1copies = ctx.p1copies()
1332 if fn in p1copies:
1332 if fn in p1copies:
1333 return p1copies[fn]
1333 return p1copies[fn]
1334 p2copies = ctx.p2copies()
1334 p2copies = ctx.p2copies()
1335 if fn in p2copies:
1335 if fn in p2copies:
1336 return p2copies[fn]
1336 return p2copies[fn]
1337 return None
1337 return None
1338
1338
1339 return getrenamed
1339 return getrenamed
1340
1340
1341 rcache = {}
1341 rcache = {}
1342 if endrev is None:
1342 if endrev is None:
1343 endrev = len(repo)
1343 endrev = len(repo)
1344
1344
1345 def getrenamed(fn, rev):
1345 def getrenamed(fn, rev):
1346 '''looks up all renames for a file (up to endrev) the first
1346 '''looks up all renames for a file (up to endrev) the first
1347 time the file is given. It indexes on the changerev and only
1347 time the file is given. It indexes on the changerev and only
1348 parses the manifest if linkrev != changerev.
1348 parses the manifest if linkrev != changerev.
1349 Returns rename info for fn at changerev rev.'''
1349 Returns rename info for fn at changerev rev.'''
1350 if fn not in rcache:
1350 if fn not in rcache:
1351 rcache[fn] = {}
1351 rcache[fn] = {}
1352 fl = repo.file(fn)
1352 fl = repo.file(fn)
1353 for i in fl:
1353 for i in fl:
1354 lr = fl.linkrev(i)
1354 lr = fl.linkrev(i)
1355 renamed = fl.renamed(fl.node(i))
1355 renamed = fl.renamed(fl.node(i))
1356 rcache[fn][lr] = renamed and renamed[0]
1356 rcache[fn][lr] = renamed and renamed[0]
1357 if lr >= endrev:
1357 if lr >= endrev:
1358 break
1358 break
1359 if rev in rcache[fn]:
1359 if rev in rcache[fn]:
1360 return rcache[fn][rev]
1360 return rcache[fn][rev]
1361
1361
1362 # If linkrev != rev (i.e. rev not found in rcache) fallback to
1362 # If linkrev != rev (i.e. rev not found in rcache) fallback to
1363 # filectx logic.
1363 # filectx logic.
1364 try:
1364 try:
1365 return repo[rev][fn].copysource()
1365 return repo[rev][fn].copysource()
1366 except error.LookupError:
1366 except error.LookupError:
1367 return None
1367 return None
1368
1368
1369 return getrenamed
1369 return getrenamed
1370
1370
1371
1371
1372 def getcopiesfn(repo, endrev=None):
1372 def getcopiesfn(repo, endrev=None):
1373 if copiesmod.usechangesetcentricalgo(repo):
1373 if copiesmod.usechangesetcentricalgo(repo):
1374
1374
1375 def copiesfn(ctx):
1375 def copiesfn(ctx):
1376 if ctx.p2copies():
1376 if ctx.p2copies():
1377 allcopies = ctx.p1copies().copy()
1377 allcopies = ctx.p1copies().copy()
1378 # There should be no overlap
1378 # There should be no overlap
1379 allcopies.update(ctx.p2copies())
1379 allcopies.update(ctx.p2copies())
1380 return sorted(allcopies.items())
1380 return sorted(allcopies.items())
1381 else:
1381 else:
1382 return sorted(ctx.p1copies().items())
1382 return sorted(ctx.p1copies().items())
1383
1383
1384 else:
1384 else:
1385 getrenamed = getrenamedfn(repo, endrev)
1385 getrenamed = getrenamedfn(repo, endrev)
1386
1386
1387 def copiesfn(ctx):
1387 def copiesfn(ctx):
1388 copies = []
1388 copies = []
1389 for fn in ctx.files():
1389 for fn in ctx.files():
1390 rename = getrenamed(fn, ctx.rev())
1390 rename = getrenamed(fn, ctx.rev())
1391 if rename:
1391 if rename:
1392 copies.append((fn, rename))
1392 copies.append((fn, rename))
1393 return copies
1393 return copies
1394
1394
1395 return copiesfn
1395 return copiesfn
1396
1396
1397
1397
1398 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1398 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1399 """Update the dirstate to reflect the intent of copying src to dst. For
1399 """Update the dirstate to reflect the intent of copying src to dst. For
1400 different reasons it might not end with dst being marked as copied from src.
1400 different reasons it might not end with dst being marked as copied from src.
1401 """
1401 """
1402 origsrc = repo.dirstate.copied(src) or src
1402 origsrc = repo.dirstate.copied(src) or src
1403 if dst == origsrc: # copying back a copy?
1403 if dst == origsrc: # copying back a copy?
1404 if repo.dirstate[dst] not in b'mn' and not dryrun:
1404 if repo.dirstate[dst] not in b'mn' and not dryrun:
1405 repo.dirstate.normallookup(dst)
1405 repo.dirstate.normallookup(dst)
1406 else:
1406 else:
1407 if repo.dirstate[origsrc] == b'a' and origsrc == src:
1407 if repo.dirstate[origsrc] == b'a' and origsrc == src:
1408 if not ui.quiet:
1408 if not ui.quiet:
1409 ui.warn(
1409 ui.warn(
1410 _(
1410 _(
1411 b"%s has not been committed yet, so no copy "
1411 b"%s has not been committed yet, so no copy "
1412 b"data will be stored for %s.\n"
1412 b"data will be stored for %s.\n"
1413 )
1413 )
1414 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd))
1414 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd))
1415 )
1415 )
1416 if repo.dirstate[dst] in b'?r' and not dryrun:
1416 if repo.dirstate[dst] in b'?r' and not dryrun:
1417 wctx.add([dst])
1417 wctx.add([dst])
1418 elif not dryrun:
1418 elif not dryrun:
1419 wctx.copy(origsrc, dst)
1419 wctx.copy(origsrc, dst)
1420
1420
1421
1421
1422 def movedirstate(repo, newctx, match=None):
1422 def movedirstate(repo, newctx, match=None):
1423 """Move the dirstate to newctx and adjust it as necessary.
1423 """Move the dirstate to newctx and adjust it as necessary.
1424
1424
1425 A matcher can be provided as an optimization. It is probably a bug to pass
1425 A matcher can be provided as an optimization. It is probably a bug to pass
1426 a matcher that doesn't match all the differences between the parent of the
1426 a matcher that doesn't match all the differences between the parent of the
1427 working copy and newctx.
1427 working copy and newctx.
1428 """
1428 """
1429 oldctx = repo[b'.']
1429 oldctx = repo[b'.']
1430 ds = repo.dirstate
1430 ds = repo.dirstate
1431 ds.setparents(newctx.node(), nullid)
1431 ds.setparents(newctx.node(), nullid)
1432 copies = dict(ds.copies())
1432 copies = dict(ds.copies())
1433 s = newctx.status(oldctx, match=match)
1433 s = newctx.status(oldctx, match=match)
1434 for f in s.modified:
1434 for f in s.modified:
1435 if ds[f] == b'r':
1435 if ds[f] == b'r':
1436 # modified + removed -> removed
1436 # modified + removed -> removed
1437 continue
1437 continue
1438 ds.normallookup(f)
1438 ds.normallookup(f)
1439
1439
1440 for f in s.added:
1440 for f in s.added:
1441 if ds[f] == b'r':
1441 if ds[f] == b'r':
1442 # added + removed -> unknown
1442 # added + removed -> unknown
1443 ds.drop(f)
1443 ds.drop(f)
1444 elif ds[f] != b'a':
1444 elif ds[f] != b'a':
1445 ds.add(f)
1445 ds.add(f)
1446
1446
1447 for f in s.removed:
1447 for f in s.removed:
1448 if ds[f] == b'a':
1448 if ds[f] == b'a':
1449 # removed + added -> normal
1449 # removed + added -> normal
1450 ds.normallookup(f)
1450 ds.normallookup(f)
1451 elif ds[f] != b'r':
1451 elif ds[f] != b'r':
1452 ds.remove(f)
1452 ds.remove(f)
1453
1453
1454 # Merge old parent and old working dir copies
1454 # Merge old parent and old working dir copies
1455 oldcopies = copiesmod.pathcopies(newctx, oldctx, match)
1455 oldcopies = copiesmod.pathcopies(newctx, oldctx, match)
1456 oldcopies.update(copies)
1456 oldcopies.update(copies)
1457 copies = dict(
1457 copies = dict(
1458 (dst, oldcopies.get(src, src))
1458 (dst, oldcopies.get(src, src))
1459 for dst, src in pycompat.iteritems(oldcopies)
1459 for dst, src in pycompat.iteritems(oldcopies)
1460 )
1460 )
1461 # Adjust the dirstate copies
1461 # Adjust the dirstate copies
1462 for dst, src in pycompat.iteritems(copies):
1462 for dst, src in pycompat.iteritems(copies):
1463 if src not in newctx or dst in newctx or ds[dst] != b'a':
1463 if src not in newctx or dst in newctx or ds[dst] != b'a':
1464 src = None
1464 src = None
1465 ds.copy(src, dst)
1465 ds.copy(src, dst)
1466
1466
1467
1467
1468 def writerequires(opener, requirements):
1468 def writerequires(opener, requirements):
1469 with opener(b'requires', b'w', atomictemp=True) as fp:
1469 with opener(b'requires', b'w', atomictemp=True) as fp:
1470 for r in sorted(requirements):
1470 for r in sorted(requirements):
1471 fp.write(b"%s\n" % r)
1471 fp.write(b"%s\n" % r)
1472
1472
1473
1473
1474 class filecachesubentry(object):
1474 class filecachesubentry(object):
1475 def __init__(self, path, stat):
1475 def __init__(self, path, stat):
1476 self.path = path
1476 self.path = path
1477 self.cachestat = None
1477 self.cachestat = None
1478 self._cacheable = None
1478 self._cacheable = None
1479
1479
1480 if stat:
1480 if stat:
1481 self.cachestat = filecachesubentry.stat(self.path)
1481 self.cachestat = filecachesubentry.stat(self.path)
1482
1482
1483 if self.cachestat:
1483 if self.cachestat:
1484 self._cacheable = self.cachestat.cacheable()
1484 self._cacheable = self.cachestat.cacheable()
1485 else:
1485 else:
1486 # None means we don't know yet
1486 # None means we don't know yet
1487 self._cacheable = None
1487 self._cacheable = None
1488
1488
1489 def refresh(self):
1489 def refresh(self):
1490 if self.cacheable():
1490 if self.cacheable():
1491 self.cachestat = filecachesubentry.stat(self.path)
1491 self.cachestat = filecachesubentry.stat(self.path)
1492
1492
1493 def cacheable(self):
1493 def cacheable(self):
1494 if self._cacheable is not None:
1494 if self._cacheable is not None:
1495 return self._cacheable
1495 return self._cacheable
1496
1496
1497 # we don't know yet, assume it is for now
1497 # we don't know yet, assume it is for now
1498 return True
1498 return True
1499
1499
1500 def changed(self):
1500 def changed(self):
1501 # no point in going further if we can't cache it
1501 # no point in going further if we can't cache it
1502 if not self.cacheable():
1502 if not self.cacheable():
1503 return True
1503 return True
1504
1504
1505 newstat = filecachesubentry.stat(self.path)
1505 newstat = filecachesubentry.stat(self.path)
1506
1506
1507 # we may not know if it's cacheable yet, check again now
1507 # we may not know if it's cacheable yet, check again now
1508 if newstat and self._cacheable is None:
1508 if newstat and self._cacheable is None:
1509 self._cacheable = newstat.cacheable()
1509 self._cacheable = newstat.cacheable()
1510
1510
1511 # check again
1511 # check again
1512 if not self._cacheable:
1512 if not self._cacheable:
1513 return True
1513 return True
1514
1514
1515 if self.cachestat != newstat:
1515 if self.cachestat != newstat:
1516 self.cachestat = newstat
1516 self.cachestat = newstat
1517 return True
1517 return True
1518 else:
1518 else:
1519 return False
1519 return False
1520
1520
1521 @staticmethod
1521 @staticmethod
1522 def stat(path):
1522 def stat(path):
1523 try:
1523 try:
1524 return util.cachestat(path)
1524 return util.cachestat(path)
1525 except OSError as e:
1525 except OSError as e:
1526 if e.errno != errno.ENOENT:
1526 if e.errno != errno.ENOENT:
1527 raise
1527 raise
1528
1528
1529
1529
1530 class filecacheentry(object):
1530 class filecacheentry(object):
1531 def __init__(self, paths, stat=True):
1531 def __init__(self, paths, stat=True):
1532 self._entries = []
1532 self._entries = []
1533 for path in paths:
1533 for path in paths:
1534 self._entries.append(filecachesubentry(path, stat))
1534 self._entries.append(filecachesubentry(path, stat))
1535
1535
1536 def changed(self):
1536 def changed(self):
1537 '''true if any entry has changed'''
1537 '''true if any entry has changed'''
1538 for entry in self._entries:
1538 for entry in self._entries:
1539 if entry.changed():
1539 if entry.changed():
1540 return True
1540 return True
1541 return False
1541 return False
1542
1542
1543 def refresh(self):
1543 def refresh(self):
1544 for entry in self._entries:
1544 for entry in self._entries:
1545 entry.refresh()
1545 entry.refresh()
1546
1546
1547
1547
1548 class filecache(object):
1548 class filecache(object):
1549 """A property like decorator that tracks files under .hg/ for updates.
1549 """A property like decorator that tracks files under .hg/ for updates.
1550
1550
1551 On first access, the files defined as arguments are stat()ed and the
1551 On first access, the files defined as arguments are stat()ed and the
1552 results cached. The decorated function is called. The results are stashed
1552 results cached. The decorated function is called. The results are stashed
1553 away in a ``_filecache`` dict on the object whose method is decorated.
1553 away in a ``_filecache`` dict on the object whose method is decorated.
1554
1554
1555 On subsequent access, the cached result is used as it is set to the
1555 On subsequent access, the cached result is used as it is set to the
1556 instance dictionary.
1556 instance dictionary.
1557
1557
1558 On external property set/delete operations, the caller must update the
1558 On external property set/delete operations, the caller must update the
1559 corresponding _filecache entry appropriately. Use __class__.<attr>.set()
1559 corresponding _filecache entry appropriately. Use __class__.<attr>.set()
1560 instead of directly setting <attr>.
1560 instead of directly setting <attr>.
1561
1561
1562 When using the property API, the cached data is always used if available.
1562 When using the property API, the cached data is always used if available.
1563 No stat() is performed to check if the file has changed.
1563 No stat() is performed to check if the file has changed.
1564
1564
1565 Others can muck about with the state of the ``_filecache`` dict. e.g. they
1565 Others can muck about with the state of the ``_filecache`` dict. e.g. they
1566 can populate an entry before the property's getter is called. In this case,
1566 can populate an entry before the property's getter is called. In this case,
1567 entries in ``_filecache`` will be used during property operations,
1567 entries in ``_filecache`` will be used during property operations,
1568 if available. If the underlying file changes, it is up to external callers
1568 if available. If the underlying file changes, it is up to external callers
1569 to reflect this by e.g. calling ``delattr(obj, attr)`` to remove the cached
1569 to reflect this by e.g. calling ``delattr(obj, attr)`` to remove the cached
1570 method result as well as possibly calling ``del obj._filecache[attr]`` to
1570 method result as well as possibly calling ``del obj._filecache[attr]`` to
1571 remove the ``filecacheentry``.
1571 remove the ``filecacheentry``.
1572 """
1572 """
1573
1573
1574 def __init__(self, *paths):
1574 def __init__(self, *paths):
1575 self.paths = paths
1575 self.paths = paths
1576
1576
1577 def join(self, obj, fname):
1577 def join(self, obj, fname):
1578 """Used to compute the runtime path of a cached file.
1578 """Used to compute the runtime path of a cached file.
1579
1579
1580 Users should subclass filecache and provide their own version of this
1580 Users should subclass filecache and provide their own version of this
1581 function to call the appropriate join function on 'obj' (an instance
1581 function to call the appropriate join function on 'obj' (an instance
1582 of the class that its member function was decorated).
1582 of the class that its member function was decorated).
1583 """
1583 """
1584 raise NotImplementedError
1584 raise NotImplementedError
1585
1585
1586 def __call__(self, func):
1586 def __call__(self, func):
1587 self.func = func
1587 self.func = func
1588 self.sname = func.__name__
1588 self.sname = func.__name__
1589 self.name = pycompat.sysbytes(self.sname)
1589 self.name = pycompat.sysbytes(self.sname)
1590 return self
1590 return self
1591
1591
1592 def __get__(self, obj, type=None):
1592 def __get__(self, obj, type=None):
1593 # if accessed on the class, return the descriptor itself.
1593 # if accessed on the class, return the descriptor itself.
1594 if obj is None:
1594 if obj is None:
1595 return self
1595 return self
1596
1596
1597 assert self.sname not in obj.__dict__
1597 assert self.sname not in obj.__dict__
1598
1598
1599 entry = obj._filecache.get(self.name)
1599 entry = obj._filecache.get(self.name)
1600
1600
1601 if entry:
1601 if entry:
1602 if entry.changed():
1602 if entry.changed():
1603 entry.obj = self.func(obj)
1603 entry.obj = self.func(obj)
1604 else:
1604 else:
1605 paths = [self.join(obj, path) for path in self.paths]
1605 paths = [self.join(obj, path) for path in self.paths]
1606
1606
1607 # We stat -before- creating the object so our cache doesn't lie if
1607 # We stat -before- creating the object so our cache doesn't lie if
1608 # a writer modified between the time we read and stat
1608 # a writer modified between the time we read and stat
1609 entry = filecacheentry(paths, True)
1609 entry = filecacheentry(paths, True)
1610 entry.obj = self.func(obj)
1610 entry.obj = self.func(obj)
1611
1611
1612 obj._filecache[self.name] = entry
1612 obj._filecache[self.name] = entry
1613
1613
1614 obj.__dict__[self.sname] = entry.obj
1614 obj.__dict__[self.sname] = entry.obj
1615 return entry.obj
1615 return entry.obj
1616
1616
1617 # don't implement __set__(), which would make __dict__ lookup as slow as
1617 # don't implement __set__(), which would make __dict__ lookup as slow as
1618 # function call.
1618 # function call.
1619
1619
1620 def set(self, obj, value):
1620 def set(self, obj, value):
1621 if self.name not in obj._filecache:
1621 if self.name not in obj._filecache:
1622 # we add an entry for the missing value because X in __dict__
1622 # we add an entry for the missing value because X in __dict__
1623 # implies X in _filecache
1623 # implies X in _filecache
1624 paths = [self.join(obj, path) for path in self.paths]
1624 paths = [self.join(obj, path) for path in self.paths]
1625 ce = filecacheentry(paths, False)
1625 ce = filecacheentry(paths, False)
1626 obj._filecache[self.name] = ce
1626 obj._filecache[self.name] = ce
1627 else:
1627 else:
1628 ce = obj._filecache[self.name]
1628 ce = obj._filecache[self.name]
1629
1629
1630 ce.obj = value # update cached copy
1630 ce.obj = value # update cached copy
1631 obj.__dict__[self.sname] = value # update copy returned by obj.x
1631 obj.__dict__[self.sname] = value # update copy returned by obj.x
1632
1632
1633
1633
1634 def extdatasource(repo, source):
1634 def extdatasource(repo, source):
1635 """Gather a map of rev -> value dict from the specified source
1635 """Gather a map of rev -> value dict from the specified source
1636
1636
1637 A source spec is treated as a URL, with a special case shell: type
1637 A source spec is treated as a URL, with a special case shell: type
1638 for parsing the output from a shell command.
1638 for parsing the output from a shell command.
1639
1639
1640 The data is parsed as a series of newline-separated records where
1640 The data is parsed as a series of newline-separated records where
1641 each record is a revision specifier optionally followed by a space
1641 each record is a revision specifier optionally followed by a space
1642 and a freeform string value. If the revision is known locally, it
1642 and a freeform string value. If the revision is known locally, it
1643 is converted to a rev, otherwise the record is skipped.
1643 is converted to a rev, otherwise the record is skipped.
1644
1644
1645 Note that both key and value are treated as UTF-8 and converted to
1645 Note that both key and value are treated as UTF-8 and converted to
1646 the local encoding. This allows uniformity between local and
1646 the local encoding. This allows uniformity between local and
1647 remote data sources.
1647 remote data sources.
1648 """
1648 """
1649
1649
1650 spec = repo.ui.config(b"extdata", source)
1650 spec = repo.ui.config(b"extdata", source)
1651 if not spec:
1651 if not spec:
1652 raise error.Abort(_(b"unknown extdata source '%s'") % source)
1652 raise error.Abort(_(b"unknown extdata source '%s'") % source)
1653
1653
1654 data = {}
1654 data = {}
1655 src = proc = None
1655 src = proc = None
1656 try:
1656 try:
1657 if spec.startswith(b"shell:"):
1657 if spec.startswith(b"shell:"):
1658 # external commands should be run relative to the repo root
1658 # external commands should be run relative to the repo root
1659 cmd = spec[6:]
1659 cmd = spec[6:]
1660 proc = subprocess.Popen(
1660 proc = subprocess.Popen(
1661 procutil.tonativestr(cmd),
1661 procutil.tonativestr(cmd),
1662 shell=True,
1662 shell=True,
1663 bufsize=-1,
1663 bufsize=-1,
1664 close_fds=procutil.closefds,
1664 close_fds=procutil.closefds,
1665 stdout=subprocess.PIPE,
1665 stdout=subprocess.PIPE,
1666 cwd=procutil.tonativestr(repo.root),
1666 cwd=procutil.tonativestr(repo.root),
1667 )
1667 )
1668 src = proc.stdout
1668 src = proc.stdout
1669 else:
1669 else:
1670 # treat as a URL or file
1670 # treat as a URL or file
1671 src = url.open(repo.ui, spec)
1671 src = url.open(repo.ui, spec)
1672 for l in src:
1672 for l in src:
1673 if b" " in l:
1673 if b" " in l:
1674 k, v = l.strip().split(b" ", 1)
1674 k, v = l.strip().split(b" ", 1)
1675 else:
1675 else:
1676 k, v = l.strip(), b""
1676 k, v = l.strip(), b""
1677
1677
1678 k = encoding.tolocal(k)
1678 k = encoding.tolocal(k)
1679 try:
1679 try:
1680 data[revsingle(repo, k).rev()] = encoding.tolocal(v)
1680 data[revsingle(repo, k).rev()] = encoding.tolocal(v)
1681 except (error.LookupError, error.RepoLookupError):
1681 except (error.LookupError, error.RepoLookupError):
1682 pass # we ignore data for nodes that don't exist locally
1682 pass # we ignore data for nodes that don't exist locally
1683 finally:
1683 finally:
1684 if proc:
1684 if proc:
1685 try:
1685 try:
1686 proc.communicate()
1686 proc.communicate()
1687 except ValueError:
1687 except ValueError:
1688 # This happens if we started iterating src and then
1688 # This happens if we started iterating src and then
1689 # get a parse error on a line. It should be safe to ignore.
1689 # get a parse error on a line. It should be safe to ignore.
1690 pass
1690 pass
1691 if src:
1691 if src:
1692 src.close()
1692 src.close()
1693 if proc and proc.returncode != 0:
1693 if proc and proc.returncode != 0:
1694 raise error.Abort(
1694 raise error.Abort(
1695 _(b"extdata command '%s' failed: %s")
1695 _(b"extdata command '%s' failed: %s")
1696 % (cmd, procutil.explainexit(proc.returncode))
1696 % (cmd, procutil.explainexit(proc.returncode))
1697 )
1697 )
1698
1698
1699 return data
1699 return data
1700
1700
1701
1701
1702 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1702 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1703 if lock is None:
1703 if lock is None:
1704 raise error.LockInheritanceContractViolation(
1704 raise error.LockInheritanceContractViolation(
1705 b'lock can only be inherited while held'
1705 b'lock can only be inherited while held'
1706 )
1706 )
1707 if environ is None:
1707 if environ is None:
1708 environ = {}
1708 environ = {}
1709 with lock.inherit() as locker:
1709 with lock.inherit() as locker:
1710 environ[envvar] = locker
1710 environ[envvar] = locker
1711 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1711 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1712
1712
1713
1713
1714 def wlocksub(repo, cmd, *args, **kwargs):
1714 def wlocksub(repo, cmd, *args, **kwargs):
1715 """run cmd as a subprocess that allows inheriting repo's wlock
1715 """run cmd as a subprocess that allows inheriting repo's wlock
1716
1716
1717 This can only be called while the wlock is held. This takes all the
1717 This can only be called while the wlock is held. This takes all the
1718 arguments that ui.system does, and returns the exit code of the
1718 arguments that ui.system does, and returns the exit code of the
1719 subprocess."""
1719 subprocess."""
1720 return _locksub(
1720 return _locksub(
1721 repo, repo.currentwlock(), b'HG_WLOCK_LOCKER', cmd, *args, **kwargs
1721 repo, repo.currentwlock(), b'HG_WLOCK_LOCKER', cmd, *args, **kwargs
1722 )
1722 )
1723
1723
1724
1724
1725 class progress(object):
1725 class progress(object):
1726 def __init__(self, ui, updatebar, topic, unit=b"", total=None):
1726 def __init__(self, ui, updatebar, topic, unit=b"", total=None):
1727 self.ui = ui
1727 self.ui = ui
1728 self.pos = 0
1728 self.pos = 0
1729 self.topic = topic
1729 self.topic = topic
1730 self.unit = unit
1730 self.unit = unit
1731 self.total = total
1731 self.total = total
1732 self.debug = ui.configbool(b'progress', b'debug')
1732 self.debug = ui.configbool(b'progress', b'debug')
1733 self._updatebar = updatebar
1733 self._updatebar = updatebar
1734
1734
1735 def __enter__(self):
1735 def __enter__(self):
1736 return self
1736 return self
1737
1737
1738 def __exit__(self, exc_type, exc_value, exc_tb):
1738 def __exit__(self, exc_type, exc_value, exc_tb):
1739 self.complete()
1739 self.complete()
1740
1740
1741 def update(self, pos, item=b"", total=None):
1741 def update(self, pos, item=b"", total=None):
1742 assert pos is not None
1742 assert pos is not None
1743 if total:
1743 if total:
1744 self.total = total
1744 self.total = total
1745 self.pos = pos
1745 self.pos = pos
1746 self._updatebar(self.topic, self.pos, item, self.unit, self.total)
1746 self._updatebar(self.topic, self.pos, item, self.unit, self.total)
1747 if self.debug:
1747 if self.debug:
1748 self._printdebug(item)
1748 self._printdebug(item)
1749
1749
1750 def increment(self, step=1, item=b"", total=None):
1750 def increment(self, step=1, item=b"", total=None):
1751 self.update(self.pos + step, item, total)
1751 self.update(self.pos + step, item, total)
1752
1752
1753 def complete(self):
1753 def complete(self):
1754 self.pos = None
1754 self.pos = None
1755 self.unit = b""
1755 self.unit = b""
1756 self.total = None
1756 self.total = None
1757 self._updatebar(self.topic, self.pos, b"", self.unit, self.total)
1757 self._updatebar(self.topic, self.pos, b"", self.unit, self.total)
1758
1758
1759 def _printdebug(self, item):
1759 def _printdebug(self, item):
1760 if self.unit:
1760 if self.unit:
1761 unit = b' ' + self.unit
1761 unit = b' ' + self.unit
1762 if item:
1762 if item:
1763 item = b' ' + item
1763 item = b' ' + item
1764
1764
1765 if self.total:
1765 if self.total:
1766 pct = 100.0 * self.pos / self.total
1766 pct = 100.0 * self.pos / self.total
1767 self.ui.debug(
1767 self.ui.debug(
1768 b'%s:%s %d/%d%s (%4.2f%%)\n'
1768 b'%s:%s %d/%d%s (%4.2f%%)\n'
1769 % (self.topic, item, self.pos, self.total, unit, pct)
1769 % (self.topic, item, self.pos, self.total, unit, pct)
1770 )
1770 )
1771 else:
1771 else:
1772 self.ui.debug(b'%s:%s %d%s\n' % (self.topic, item, self.pos, unit))
1772 self.ui.debug(b'%s:%s %d%s\n' % (self.topic, item, self.pos, unit))
1773
1773
1774
1774
1775 def gdinitconfig(ui):
1775 def gdinitconfig(ui):
1776 """helper function to know if a repo should be created as general delta
1776 """helper function to know if a repo should be created as general delta
1777 """
1777 """
1778 # experimental config: format.generaldelta
1778 # experimental config: format.generaldelta
1779 return ui.configbool(b'format', b'generaldelta') or ui.configbool(
1779 return ui.configbool(b'format', b'generaldelta') or ui.configbool(
1780 b'format', b'usegeneraldelta'
1780 b'format', b'usegeneraldelta'
1781 )
1781 )
1782
1782
1783
1783
1784 def gddeltaconfig(ui):
1784 def gddeltaconfig(ui):
1785 """helper function to know if incoming delta should be optimised
1785 """helper function to know if incoming delta should be optimised
1786 """
1786 """
1787 # experimental config: format.generaldelta
1787 # experimental config: format.generaldelta
1788 return ui.configbool(b'format', b'generaldelta')
1788 return ui.configbool(b'format', b'generaldelta')
1789
1789
1790
1790
1791 class simplekeyvaluefile(object):
1791 class simplekeyvaluefile(object):
1792 """A simple file with key=value lines
1792 """A simple file with key=value lines
1793
1793
1794 Keys must be alphanumerics and start with a letter, values must not
1794 Keys must be alphanumerics and start with a letter, values must not
1795 contain '\n' characters"""
1795 contain '\n' characters"""
1796
1796
1797 firstlinekey = b'__firstline'
1797 firstlinekey = b'__firstline'
1798
1798
1799 def __init__(self, vfs, path, keys=None):
1799 def __init__(self, vfs, path, keys=None):
1800 self.vfs = vfs
1800 self.vfs = vfs
1801 self.path = path
1801 self.path = path
1802
1802
1803 def read(self, firstlinenonkeyval=False):
1803 def read(self, firstlinenonkeyval=False):
1804 """Read the contents of a simple key-value file
1804 """Read the contents of a simple key-value file
1805
1805
1806 'firstlinenonkeyval' indicates whether the first line of file should
1806 'firstlinenonkeyval' indicates whether the first line of file should
1807 be treated as a key-value pair or reuturned fully under the
1807 be treated as a key-value pair or reuturned fully under the
1808 __firstline key."""
1808 __firstline key."""
1809 lines = self.vfs.readlines(self.path)
1809 lines = self.vfs.readlines(self.path)
1810 d = {}
1810 d = {}
1811 if firstlinenonkeyval:
1811 if firstlinenonkeyval:
1812 if not lines:
1812 if not lines:
1813 e = _(b"empty simplekeyvalue file")
1813 e = _(b"empty simplekeyvalue file")
1814 raise error.CorruptedState(e)
1814 raise error.CorruptedState(e)
1815 # we don't want to include '\n' in the __firstline
1815 # we don't want to include '\n' in the __firstline
1816 d[self.firstlinekey] = lines[0][:-1]
1816 d[self.firstlinekey] = lines[0][:-1]
1817 del lines[0]
1817 del lines[0]
1818
1818
1819 try:
1819 try:
1820 # the 'if line.strip()' part prevents us from failing on empty
1820 # the 'if line.strip()' part prevents us from failing on empty
1821 # lines which only contain '\n' therefore are not skipped
1821 # lines which only contain '\n' therefore are not skipped
1822 # by 'if line'
1822 # by 'if line'
1823 updatedict = dict(
1823 updatedict = dict(
1824 line[:-1].split(b'=', 1) for line in lines if line.strip()
1824 line[:-1].split(b'=', 1) for line in lines if line.strip()
1825 )
1825 )
1826 if self.firstlinekey in updatedict:
1826 if self.firstlinekey in updatedict:
1827 e = _(b"%r can't be used as a key")
1827 e = _(b"%r can't be used as a key")
1828 raise error.CorruptedState(e % self.firstlinekey)
1828 raise error.CorruptedState(e % self.firstlinekey)
1829 d.update(updatedict)
1829 d.update(updatedict)
1830 except ValueError as e:
1830 except ValueError as e:
1831 raise error.CorruptedState(stringutil.forcebytestr(e))
1831 raise error.CorruptedState(stringutil.forcebytestr(e))
1832 return d
1832 return d
1833
1833
1834 def write(self, data, firstline=None):
1834 def write(self, data, firstline=None):
1835 """Write key=>value mapping to a file
1835 """Write key=>value mapping to a file
1836 data is a dict. Keys must be alphanumerical and start with a letter.
1836 data is a dict. Keys must be alphanumerical and start with a letter.
1837 Values must not contain newline characters.
1837 Values must not contain newline characters.
1838
1838
1839 If 'firstline' is not None, it is written to file before
1839 If 'firstline' is not None, it is written to file before
1840 everything else, as it is, not in a key=value form"""
1840 everything else, as it is, not in a key=value form"""
1841 lines = []
1841 lines = []
1842 if firstline is not None:
1842 if firstline is not None:
1843 lines.append(b'%s\n' % firstline)
1843 lines.append(b'%s\n' % firstline)
1844
1844
1845 for k, v in data.items():
1845 for k, v in data.items():
1846 if k == self.firstlinekey:
1846 if k == self.firstlinekey:
1847 e = b"key name '%s' is reserved" % self.firstlinekey
1847 e = b"key name '%s' is reserved" % self.firstlinekey
1848 raise error.ProgrammingError(e)
1848 raise error.ProgrammingError(e)
1849 if not k[0:1].isalpha():
1849 if not k[0:1].isalpha():
1850 e = b"keys must start with a letter in a key-value file"
1850 e = b"keys must start with a letter in a key-value file"
1851 raise error.ProgrammingError(e)
1851 raise error.ProgrammingError(e)
1852 if not k.isalnum():
1852 if not k.isalnum():
1853 e = b"invalid key name in a simple key-value file"
1853 e = b"invalid key name in a simple key-value file"
1854 raise error.ProgrammingError(e)
1854 raise error.ProgrammingError(e)
1855 if b'\n' in v:
1855 if b'\n' in v:
1856 e = b"invalid value in a simple key-value file"
1856 e = b"invalid value in a simple key-value file"
1857 raise error.ProgrammingError(e)
1857 raise error.ProgrammingError(e)
1858 lines.append(b"%s=%s\n" % (k, v))
1858 lines.append(b"%s=%s\n" % (k, v))
1859 with self.vfs(self.path, mode=b'wb', atomictemp=True) as fp:
1859 with self.vfs(self.path, mode=b'wb', atomictemp=True) as fp:
1860 fp.write(b''.join(lines))
1860 fp.write(b''.join(lines))
1861
1861
1862
1862
1863 _reportobsoletedsource = [
1863 _reportobsoletedsource = [
1864 b'debugobsolete',
1864 b'debugobsolete',
1865 b'pull',
1865 b'pull',
1866 b'push',
1866 b'push',
1867 b'serve',
1867 b'serve',
1868 b'unbundle',
1868 b'unbundle',
1869 ]
1869 ]
1870
1870
1871 _reportnewcssource = [
1871 _reportnewcssource = [
1872 b'pull',
1872 b'pull',
1873 b'unbundle',
1873 b'unbundle',
1874 ]
1874 ]
1875
1875
1876
1876
1877 def prefetchfiles(repo, revs, match):
1877 def prefetchfiles(repo, revs, match):
1878 """Invokes the registered file prefetch functions, allowing extensions to
1878 """Invokes the registered file prefetch functions, allowing extensions to
1879 ensure the corresponding files are available locally, before the command
1879 ensure the corresponding files are available locally, before the command
1880 uses them."""
1880 uses them."""
1881 if match:
1881 if match:
1882 # The command itself will complain about files that don't exist, so
1882 # The command itself will complain about files that don't exist, so
1883 # don't duplicate the message.
1883 # don't duplicate the message.
1884 match = matchmod.badmatch(match, lambda fn, msg: None)
1884 match = matchmod.badmatch(match, lambda fn, msg: None)
1885 else:
1885 else:
1886 match = matchall(repo)
1886 match = matchall(repo)
1887
1887
1888 fileprefetchhooks(repo, revs, match)
1888 fileprefetchhooks(repo, revs, match)
1889
1889
1890
1890
1891 # a list of (repo, revs, match) prefetch functions
1891 # a list of (repo, revs, match) prefetch functions
1892 fileprefetchhooks = util.hooks()
1892 fileprefetchhooks = util.hooks()
1893
1893
1894 # A marker that tells the evolve extension to suppress its own reporting
1894 # A marker that tells the evolve extension to suppress its own reporting
1895 _reportstroubledchangesets = True
1895 _reportstroubledchangesets = True
1896
1896
1897
1897
1898 def registersummarycallback(repo, otr, txnname=b''):
1898 def registersummarycallback(repo, otr, txnname=b''):
1899 """register a callback to issue a summary after the transaction is closed
1899 """register a callback to issue a summary after the transaction is closed
1900 """
1900 """
1901
1901
1902 def txmatch(sources):
1902 def txmatch(sources):
1903 return any(txnname.startswith(source) for source in sources)
1903 return any(txnname.startswith(source) for source in sources)
1904
1904
1905 categories = []
1905 categories = []
1906
1906
1907 def reportsummary(func):
1907 def reportsummary(func):
1908 """decorator for report callbacks."""
1908 """decorator for report callbacks."""
1909 # The repoview life cycle is shorter than the one of the actual
1909 # The repoview life cycle is shorter than the one of the actual
1910 # underlying repository. So the filtered object can die before the
1910 # underlying repository. So the filtered object can die before the
1911 # weakref is used leading to troubles. We keep a reference to the
1911 # weakref is used leading to troubles. We keep a reference to the
1912 # unfiltered object and restore the filtering when retrieving the
1912 # unfiltered object and restore the filtering when retrieving the
1913 # repository through the weakref.
1913 # repository through the weakref.
1914 filtername = repo.filtername
1914 filtername = repo.filtername
1915 reporef = weakref.ref(repo.unfiltered())
1915 reporef = weakref.ref(repo.unfiltered())
1916
1916
1917 def wrapped(tr):
1917 def wrapped(tr):
1918 repo = reporef()
1918 repo = reporef()
1919 if filtername:
1919 if filtername:
1920 assert repo is not None # help pytype
1920 assert repo is not None # help pytype
1921 repo = repo.filtered(filtername)
1921 repo = repo.filtered(filtername)
1922 func(repo, tr)
1922 func(repo, tr)
1923
1923
1924 newcat = b'%02i-txnreport' % len(categories)
1924 newcat = b'%02i-txnreport' % len(categories)
1925 otr.addpostclose(newcat, wrapped)
1925 otr.addpostclose(newcat, wrapped)
1926 categories.append(newcat)
1926 categories.append(newcat)
1927 return wrapped
1927 return wrapped
1928
1928
1929 @reportsummary
1929 @reportsummary
1930 def reportchangegroup(repo, tr):
1930 def reportchangegroup(repo, tr):
1931 cgchangesets = tr.changes.get(b'changegroup-count-changesets', 0)
1931 cgchangesets = tr.changes.get(b'changegroup-count-changesets', 0)
1932 cgrevisions = tr.changes.get(b'changegroup-count-revisions', 0)
1932 cgrevisions = tr.changes.get(b'changegroup-count-revisions', 0)
1933 cgfiles = tr.changes.get(b'changegroup-count-files', 0)
1933 cgfiles = tr.changes.get(b'changegroup-count-files', 0)
1934 cgheads = tr.changes.get(b'changegroup-count-heads', 0)
1934 cgheads = tr.changes.get(b'changegroup-count-heads', 0)
1935 if cgchangesets or cgrevisions or cgfiles:
1935 if cgchangesets or cgrevisions or cgfiles:
1936 htext = b""
1936 htext = b""
1937 if cgheads:
1937 if cgheads:
1938 htext = _(b" (%+d heads)") % cgheads
1938 htext = _(b" (%+d heads)") % cgheads
1939 msg = _(b"added %d changesets with %d changes to %d files%s\n")
1939 msg = _(b"added %d changesets with %d changes to %d files%s\n")
1940 assert repo is not None # help pytype
1940 assert repo is not None # help pytype
1941 repo.ui.status(msg % (cgchangesets, cgrevisions, cgfiles, htext))
1941 repo.ui.status(msg % (cgchangesets, cgrevisions, cgfiles, htext))
1942
1942
1943 if txmatch(_reportobsoletedsource):
1943 if txmatch(_reportobsoletedsource):
1944
1944
1945 @reportsummary
1945 @reportsummary
1946 def reportobsoleted(repo, tr):
1946 def reportobsoleted(repo, tr):
1947 obsoleted = obsutil.getobsoleted(repo, tr)
1947 obsoleted = obsutil.getobsoleted(repo, tr)
1948 newmarkers = len(tr.changes.get(b'obsmarkers', ()))
1948 newmarkers = len(tr.changes.get(b'obsmarkers', ()))
1949 if newmarkers:
1949 if newmarkers:
1950 repo.ui.status(_(b'%i new obsolescence markers\n') % newmarkers)
1950 repo.ui.status(_(b'%i new obsolescence markers\n') % newmarkers)
1951 if obsoleted:
1951 if obsoleted:
1952 repo.ui.status(_(b'obsoleted %i changesets\n') % len(obsoleted))
1952 repo.ui.status(_(b'obsoleted %i changesets\n') % len(obsoleted))
1953
1953
1954 if obsolete.isenabled(
1954 if obsolete.isenabled(
1955 repo, obsolete.createmarkersopt
1955 repo, obsolete.createmarkersopt
1956 ) and repo.ui.configbool(
1956 ) and repo.ui.configbool(
1957 b'experimental', b'evolution.report-instabilities'
1957 b'experimental', b'evolution.report-instabilities'
1958 ):
1958 ):
1959 instabilitytypes = [
1959 instabilitytypes = [
1960 (b'orphan', b'orphan'),
1960 (b'orphan', b'orphan'),
1961 (b'phase-divergent', b'phasedivergent'),
1961 (b'phase-divergent', b'phasedivergent'),
1962 (b'content-divergent', b'contentdivergent'),
1962 (b'content-divergent', b'contentdivergent'),
1963 ]
1963 ]
1964
1964
1965 def getinstabilitycounts(repo):
1965 def getinstabilitycounts(repo):
1966 filtered = repo.changelog.filteredrevs
1966 filtered = repo.changelog.filteredrevs
1967 counts = {}
1967 counts = {}
1968 for instability, revset in instabilitytypes:
1968 for instability, revset in instabilitytypes:
1969 counts[instability] = len(
1969 counts[instability] = len(
1970 set(obsolete.getrevs(repo, revset)) - filtered
1970 set(obsolete.getrevs(repo, revset)) - filtered
1971 )
1971 )
1972 return counts
1972 return counts
1973
1973
1974 oldinstabilitycounts = getinstabilitycounts(repo)
1974 oldinstabilitycounts = getinstabilitycounts(repo)
1975
1975
1976 @reportsummary
1976 @reportsummary
1977 def reportnewinstabilities(repo, tr):
1977 def reportnewinstabilities(repo, tr):
1978 newinstabilitycounts = getinstabilitycounts(repo)
1978 newinstabilitycounts = getinstabilitycounts(repo)
1979 for instability, revset in instabilitytypes:
1979 for instability, revset in instabilitytypes:
1980 delta = (
1980 delta = (
1981 newinstabilitycounts[instability]
1981 newinstabilitycounts[instability]
1982 - oldinstabilitycounts[instability]
1982 - oldinstabilitycounts[instability]
1983 )
1983 )
1984 msg = getinstabilitymessage(delta, instability)
1984 msg = getinstabilitymessage(delta, instability)
1985 if msg:
1985 if msg:
1986 repo.ui.warn(msg)
1986 repo.ui.warn(msg)
1987
1987
1988 if txmatch(_reportnewcssource):
1988 if txmatch(_reportnewcssource):
1989
1989
1990 @reportsummary
1990 @reportsummary
1991 def reportnewcs(repo, tr):
1991 def reportnewcs(repo, tr):
1992 """Report the range of new revisions pulled/unbundled."""
1992 """Report the range of new revisions pulled/unbundled."""
1993 origrepolen = tr.changes.get(b'origrepolen', len(repo))
1993 origrepolen = tr.changes.get(b'origrepolen', len(repo))
1994 unfi = repo.unfiltered()
1994 unfi = repo.unfiltered()
1995 if origrepolen >= len(unfi):
1995 if origrepolen >= len(unfi):
1996 return
1996 return
1997
1997
1998 # Compute the bounds of new visible revisions' range.
1998 # Compute the bounds of new visible revisions' range.
1999 revs = smartset.spanset(repo, start=origrepolen)
1999 revs = smartset.spanset(repo, start=origrepolen)
2000 if revs:
2000 if revs:
2001 minrev, maxrev = repo[revs.min()], repo[revs.max()]
2001 minrev, maxrev = repo[revs.min()], repo[revs.max()]
2002
2002
2003 if minrev == maxrev:
2003 if minrev == maxrev:
2004 revrange = minrev
2004 revrange = minrev
2005 else:
2005 else:
2006 revrange = b'%s:%s' % (minrev, maxrev)
2006 revrange = b'%s:%s' % (minrev, maxrev)
2007 draft = len(repo.revs(b'%ld and draft()', revs))
2007 draft = len(repo.revs(b'%ld and draft()', revs))
2008 secret = len(repo.revs(b'%ld and secret()', revs))
2008 secret = len(repo.revs(b'%ld and secret()', revs))
2009 if not (draft or secret):
2009 if not (draft or secret):
2010 msg = _(b'new changesets %s\n') % revrange
2010 msg = _(b'new changesets %s\n') % revrange
2011 elif draft and secret:
2011 elif draft and secret:
2012 msg = _(b'new changesets %s (%d drafts, %d secrets)\n')
2012 msg = _(b'new changesets %s (%d drafts, %d secrets)\n')
2013 msg %= (revrange, draft, secret)
2013 msg %= (revrange, draft, secret)
2014 elif draft:
2014 elif draft:
2015 msg = _(b'new changesets %s (%d drafts)\n')
2015 msg = _(b'new changesets %s (%d drafts)\n')
2016 msg %= (revrange, draft)
2016 msg %= (revrange, draft)
2017 elif secret:
2017 elif secret:
2018 msg = _(b'new changesets %s (%d secrets)\n')
2018 msg = _(b'new changesets %s (%d secrets)\n')
2019 msg %= (revrange, secret)
2019 msg %= (revrange, secret)
2020 else:
2020 else:
2021 errormsg = b'entered unreachable condition'
2021 errormsg = b'entered unreachable condition'
2022 raise error.ProgrammingError(errormsg)
2022 raise error.ProgrammingError(errormsg)
2023 repo.ui.status(msg)
2023 repo.ui.status(msg)
2024
2024
2025 # search new changesets directly pulled as obsolete
2025 # search new changesets directly pulled as obsolete
2026 duplicates = tr.changes.get(b'revduplicates', ())
2026 duplicates = tr.changes.get(b'revduplicates', ())
2027 obsadded = unfi.revs(
2027 obsadded = unfi.revs(
2028 b'(%d: + %ld) and obsolete()', origrepolen, duplicates
2028 b'(%d: + %ld) and obsolete()', origrepolen, duplicates
2029 )
2029 )
2030 cl = repo.changelog
2030 cl = repo.changelog
2031 extinctadded = [r for r in obsadded if r not in cl]
2031 extinctadded = [r for r in obsadded if r not in cl]
2032 if extinctadded:
2032 if extinctadded:
2033 # They are not just obsolete, but obsolete and invisible
2033 # They are not just obsolete, but obsolete and invisible
2034 # we call them "extinct" internally but the terms have not been
2034 # we call them "extinct" internally but the terms have not been
2035 # exposed to users.
2035 # exposed to users.
2036 msg = b'(%d other changesets obsolete on arrival)\n'
2036 msg = b'(%d other changesets obsolete on arrival)\n'
2037 repo.ui.status(msg % len(extinctadded))
2037 repo.ui.status(msg % len(extinctadded))
2038
2038
2039 @reportsummary
2039 @reportsummary
2040 def reportphasechanges(repo, tr):
2040 def reportphasechanges(repo, tr):
2041 """Report statistics of phase changes for changesets pre-existing
2041 """Report statistics of phase changes for changesets pre-existing
2042 pull/unbundle.
2042 pull/unbundle.
2043 """
2043 """
2044 origrepolen = tr.changes.get(b'origrepolen', len(repo))
2044 origrepolen = tr.changes.get(b'origrepolen', len(repo))
2045 phasetracking = tr.changes.get(b'phases', {})
2045 phasetracking = tr.changes.get(b'phases', {})
2046 if not phasetracking:
2046 if not phasetracking:
2047 return
2047 return
2048 published = [
2048 published = [
2049 rev
2049 rev
2050 for rev, (old, new) in pycompat.iteritems(phasetracking)
2050 for rev, (old, new) in pycompat.iteritems(phasetracking)
2051 if new == phases.public and rev < origrepolen
2051 if new == phases.public and rev < origrepolen
2052 ]
2052 ]
2053 if not published:
2053 if not published:
2054 return
2054 return
2055 repo.ui.status(
2055 repo.ui.status(
2056 _(b'%d local changesets published\n') % len(published)
2056 _(b'%d local changesets published\n') % len(published)
2057 )
2057 )
2058
2058
2059
2059
2060 def getinstabilitymessage(delta, instability):
2060 def getinstabilitymessage(delta, instability):
2061 """function to return the message to show warning about new instabilities
2061 """function to return the message to show warning about new instabilities
2062
2062
2063 exists as a separate function so that extension can wrap to show more
2063 exists as a separate function so that extension can wrap to show more
2064 information like how to fix instabilities"""
2064 information like how to fix instabilities"""
2065 if delta > 0:
2065 if delta > 0:
2066 return _(b'%i new %s changesets\n') % (delta, instability)
2066 return _(b'%i new %s changesets\n') % (delta, instability)
2067
2067
2068
2068
2069 def nodesummaries(repo, nodes, maxnumnodes=4):
2069 def nodesummaries(repo, nodes, maxnumnodes=4):
2070 if len(nodes) <= maxnumnodes or repo.ui.verbose:
2070 if len(nodes) <= maxnumnodes or repo.ui.verbose:
2071 return b' '.join(short(h) for h in nodes)
2071 return b' '.join(short(h) for h in nodes)
2072 first = b' '.join(short(h) for h in nodes[:maxnumnodes])
2072 first = b' '.join(short(h) for h in nodes[:maxnumnodes])
2073 return _(b"%s and %d others") % (first, len(nodes) - maxnumnodes)
2073 return _(b"%s and %d others") % (first, len(nodes) - maxnumnodes)
2074
2074
2075
2075
2076 def enforcesinglehead(repo, tr, desc, accountclosed=False):
2076 def enforcesinglehead(repo, tr, desc, accountclosed=False):
2077 """check that no named branch has multiple heads"""
2077 """check that no named branch has multiple heads"""
2078 if desc in (b'strip', b'repair'):
2078 if desc in (b'strip', b'repair'):
2079 # skip the logic during strip
2079 # skip the logic during strip
2080 return
2080 return
2081 visible = repo.filtered(b'visible')
2081 visible = repo.filtered(b'visible')
2082 # possible improvement: we could restrict the check to affected branch
2082 # possible improvement: we could restrict the check to affected branch
2083 bm = visible.branchmap()
2083 bm = visible.branchmap()
2084 for name in bm:
2084 for name in bm:
2085 heads = bm.branchheads(name, closed=accountclosed)
2085 heads = bm.branchheads(name, closed=accountclosed)
2086 if len(heads) > 1:
2086 if len(heads) > 1:
2087 msg = _(b'rejecting multiple heads on branch "%s"')
2087 msg = _(b'rejecting multiple heads on branch "%s"')
2088 msg %= name
2088 msg %= name
2089 hint = _(b'%d heads: %s')
2089 hint = _(b'%d heads: %s')
2090 hint %= (len(heads), nodesummaries(repo, heads))
2090 hint %= (len(heads), nodesummaries(repo, heads))
2091 raise error.Abort(msg, hint=hint)
2091 raise error.Abort(msg, hint=hint)
2092
2092
2093
2093
2094 def wrapconvertsink(sink):
2094 def wrapconvertsink(sink):
2095 """Allow extensions to wrap the sink returned by convcmd.convertsink()
2095 """Allow extensions to wrap the sink returned by convcmd.convertsink()
2096 before it is used, whether or not the convert extension was formally loaded.
2096 before it is used, whether or not the convert extension was formally loaded.
2097 """
2097 """
2098 return sink
2098 return sink
2099
2099
2100
2100
2101 def unhidehashlikerevs(repo, specs, hiddentype):
2101 def unhidehashlikerevs(repo, specs, hiddentype):
2102 """parse the user specs and unhide changesets whose hash or revision number
2102 """parse the user specs and unhide changesets whose hash or revision number
2103 is passed.
2103 is passed.
2104
2104
2105 hiddentype can be: 1) 'warn': warn while unhiding changesets
2105 hiddentype can be: 1) 'warn': warn while unhiding changesets
2106 2) 'nowarn': don't warn while unhiding changesets
2106 2) 'nowarn': don't warn while unhiding changesets
2107
2107
2108 returns a repo object with the required changesets unhidden
2108 returns a repo object with the required changesets unhidden
2109 """
2109 """
2110 if not repo.filtername or not repo.ui.configbool(
2110 if not repo.filtername or not repo.ui.configbool(
2111 b'experimental', b'directaccess'
2111 b'experimental', b'directaccess'
2112 ):
2112 ):
2113 return repo
2113 return repo
2114
2114
2115 if repo.filtername not in (b'visible', b'visible-hidden'):
2115 if repo.filtername not in (b'visible', b'visible-hidden'):
2116 return repo
2116 return repo
2117
2117
2118 symbols = set()
2118 symbols = set()
2119 for spec in specs:
2119 for spec in specs:
2120 try:
2120 try:
2121 tree = revsetlang.parse(spec)
2121 tree = revsetlang.parse(spec)
2122 except error.ParseError: # will be reported by scmutil.revrange()
2122 except error.ParseError: # will be reported by scmutil.revrange()
2123 continue
2123 continue
2124
2124
2125 symbols.update(revsetlang.gethashlikesymbols(tree))
2125 symbols.update(revsetlang.gethashlikesymbols(tree))
2126
2126
2127 if not symbols:
2127 if not symbols:
2128 return repo
2128 return repo
2129
2129
2130 revs = _getrevsfromsymbols(repo, symbols)
2130 revs = _getrevsfromsymbols(repo, symbols)
2131
2131
2132 if not revs:
2132 if not revs:
2133 return repo
2133 return repo
2134
2134
2135 if hiddentype == b'warn':
2135 if hiddentype == b'warn':
2136 unfi = repo.unfiltered()
2136 unfi = repo.unfiltered()
2137 revstr = b", ".join([pycompat.bytestr(unfi[l]) for l in revs])
2137 revstr = b", ".join([pycompat.bytestr(unfi[l]) for l in revs])
2138 repo.ui.warn(
2138 repo.ui.warn(
2139 _(
2139 _(
2140 b"warning: accessing hidden changesets for write "
2140 b"warning: accessing hidden changesets for write "
2141 b"operation: %s\n"
2141 b"operation: %s\n"
2142 )
2142 )
2143 % revstr
2143 % revstr
2144 )
2144 )
2145
2145
2146 # we have to use new filtername to separate branch/tags cache until we can
2146 # we have to use new filtername to separate branch/tags cache until we can
2147 # disbale these cache when revisions are dynamically pinned.
2147 # disbale these cache when revisions are dynamically pinned.
2148 return repo.filtered(b'visible-hidden', revs)
2148 return repo.filtered(b'visible-hidden', revs)
2149
2149
2150
2150
2151 def _getrevsfromsymbols(repo, symbols):
2151 def _getrevsfromsymbols(repo, symbols):
2152 """parse the list of symbols and returns a set of revision numbers of hidden
2152 """parse the list of symbols and returns a set of revision numbers of hidden
2153 changesets present in symbols"""
2153 changesets present in symbols"""
2154 revs = set()
2154 revs = set()
2155 unfi = repo.unfiltered()
2155 unfi = repo.unfiltered()
2156 unficl = unfi.changelog
2156 unficl = unfi.changelog
2157 cl = repo.changelog
2157 cl = repo.changelog
2158 tiprev = len(unficl)
2158 tiprev = len(unficl)
2159 allowrevnums = repo.ui.configbool(b'experimental', b'directaccess.revnums')
2159 allowrevnums = repo.ui.configbool(b'experimental', b'directaccess.revnums')
2160 for s in symbols:
2160 for s in symbols:
2161 try:
2161 try:
2162 n = int(s)
2162 n = int(s)
2163 if n <= tiprev:
2163 if n <= tiprev:
2164 if not allowrevnums:
2164 if not allowrevnums:
2165 continue
2165 continue
2166 else:
2166 else:
2167 if n not in cl:
2167 if n not in cl:
2168 revs.add(n)
2168 revs.add(n)
2169 continue
2169 continue
2170 except ValueError:
2170 except ValueError:
2171 pass
2171 pass
2172
2172
2173 try:
2173 try:
2174 s = resolvehexnodeidprefix(unfi, s)
2174 s = resolvehexnodeidprefix(unfi, s)
2175 except (error.LookupError, error.WdirUnsupported):
2175 except (error.LookupError, error.WdirUnsupported):
2176 s = None
2176 s = None
2177
2177
2178 if s is not None:
2178 if s is not None:
2179 rev = unficl.rev(s)
2179 rev = unficl.rev(s)
2180 if rev not in cl:
2180 if rev not in cl:
2181 revs.add(rev)
2181 revs.add(rev)
2182
2182
2183 return revs
2183 return revs
2184
2184
2185
2185
2186 def bookmarkrevs(repo, mark):
2186 def bookmarkrevs(repo, mark):
2187 """
2187 """
2188 Select revisions reachable by a given bookmark
2188 Select revisions reachable by a given bookmark
2189 """
2189 """
2190 return repo.revs(
2190 return repo.revs(
2191 b"ancestors(bookmark(%s)) - "
2191 b"ancestors(bookmark(%s)) - "
2192 b"ancestors(head() and not bookmark(%s)) - "
2192 b"ancestors(head() and not bookmark(%s)) - "
2193 b"ancestors(bookmark() and not bookmark(%s))",
2193 b"ancestors(bookmark() and not bookmark(%s))",
2194 mark,
2194 mark,
2195 mark,
2195 mark,
2196 mark,
2196 mark,
2197 )
2197 )
General Comments 0
You need to be logged in to leave comments. Login now