##// END OF EJS Templates
scmutil: try-delete `.hg/store/requires` if store requirements are empty...
Pulkit Goyal -
r46531:aba4f2c9 default
parent child Browse files
Show More
@@ -1,2312 +1,2315 b''
1 # scmutil.py - Mercurial core utility functions
1 # scmutil.py - Mercurial core utility functions
2 #
2 #
3 # Copyright Matt Mackall <mpm@selenic.com>
3 # Copyright Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import glob
11 import glob
12 import os
12 import os
13 import posixpath
13 import posixpath
14 import re
14 import re
15 import subprocess
15 import subprocess
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 bin,
20 bin,
21 hex,
21 hex,
22 nullid,
22 nullid,
23 nullrev,
23 nullrev,
24 short,
24 short,
25 wdirid,
25 wdirid,
26 wdirrev,
26 wdirrev,
27 )
27 )
28 from .pycompat import getattr
28 from .pycompat import getattr
29 from .thirdparty import attr
29 from .thirdparty import attr
30 from . import (
30 from . import (
31 copies as copiesmod,
31 copies as copiesmod,
32 encoding,
32 encoding,
33 error,
33 error,
34 match as matchmod,
34 match as matchmod,
35 obsolete,
35 obsolete,
36 obsutil,
36 obsutil,
37 pathutil,
37 pathutil,
38 phases,
38 phases,
39 policy,
39 policy,
40 pycompat,
40 pycompat,
41 requirements as requirementsmod,
41 requirements as requirementsmod,
42 revsetlang,
42 revsetlang,
43 similar,
43 similar,
44 smartset,
44 smartset,
45 url,
45 url,
46 util,
46 util,
47 vfs,
47 vfs,
48 )
48 )
49
49
50 from .utils import (
50 from .utils import (
51 hashutil,
51 hashutil,
52 procutil,
52 procutil,
53 stringutil,
53 stringutil,
54 )
54 )
55
55
56 if pycompat.iswindows:
56 if pycompat.iswindows:
57 from . import scmwindows as scmplatform
57 from . import scmwindows as scmplatform
58 else:
58 else:
59 from . import scmposix as scmplatform
59 from . import scmposix as scmplatform
60
60
61 parsers = policy.importmod('parsers')
61 parsers = policy.importmod('parsers')
62 rustrevlog = policy.importrust('revlog')
62 rustrevlog = policy.importrust('revlog')
63
63
64 termsize = scmplatform.termsize
64 termsize = scmplatform.termsize
65
65
66
66
67 @attr.s(slots=True, repr=False)
67 @attr.s(slots=True, repr=False)
68 class status(object):
68 class status(object):
69 '''Struct with a list of files per status.
69 '''Struct with a list of files per status.
70
70
71 The 'deleted', 'unknown' and 'ignored' properties are only
71 The 'deleted', 'unknown' and 'ignored' properties are only
72 relevant to the working copy.
72 relevant to the working copy.
73 '''
73 '''
74
74
75 modified = attr.ib(default=attr.Factory(list))
75 modified = attr.ib(default=attr.Factory(list))
76 added = attr.ib(default=attr.Factory(list))
76 added = attr.ib(default=attr.Factory(list))
77 removed = attr.ib(default=attr.Factory(list))
77 removed = attr.ib(default=attr.Factory(list))
78 deleted = attr.ib(default=attr.Factory(list))
78 deleted = attr.ib(default=attr.Factory(list))
79 unknown = attr.ib(default=attr.Factory(list))
79 unknown = attr.ib(default=attr.Factory(list))
80 ignored = attr.ib(default=attr.Factory(list))
80 ignored = attr.ib(default=attr.Factory(list))
81 clean = attr.ib(default=attr.Factory(list))
81 clean = attr.ib(default=attr.Factory(list))
82
82
83 def __iter__(self):
83 def __iter__(self):
84 yield self.modified
84 yield self.modified
85 yield self.added
85 yield self.added
86 yield self.removed
86 yield self.removed
87 yield self.deleted
87 yield self.deleted
88 yield self.unknown
88 yield self.unknown
89 yield self.ignored
89 yield self.ignored
90 yield self.clean
90 yield self.clean
91
91
92 def __repr__(self):
92 def __repr__(self):
93 return (
93 return (
94 r'<status modified=%s, added=%s, removed=%s, deleted=%s, '
94 r'<status modified=%s, added=%s, removed=%s, deleted=%s, '
95 r'unknown=%s, ignored=%s, clean=%s>'
95 r'unknown=%s, ignored=%s, clean=%s>'
96 ) % tuple(pycompat.sysstr(stringutil.pprint(v)) for v in self)
96 ) % tuple(pycompat.sysstr(stringutil.pprint(v)) for v in self)
97
97
98
98
99 def itersubrepos(ctx1, ctx2):
99 def itersubrepos(ctx1, ctx2):
100 """find subrepos in ctx1 or ctx2"""
100 """find subrepos in ctx1 or ctx2"""
101 # Create a (subpath, ctx) mapping where we prefer subpaths from
101 # Create a (subpath, ctx) mapping where we prefer subpaths from
102 # ctx1. The subpaths from ctx2 are important when the .hgsub file
102 # ctx1. The subpaths from ctx2 are important when the .hgsub file
103 # has been modified (in ctx2) but not yet committed (in ctx1).
103 # has been modified (in ctx2) but not yet committed (in ctx1).
104 subpaths = dict.fromkeys(ctx2.substate, ctx2)
104 subpaths = dict.fromkeys(ctx2.substate, ctx2)
105 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
105 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
106
106
107 missing = set()
107 missing = set()
108
108
109 for subpath in ctx2.substate:
109 for subpath in ctx2.substate:
110 if subpath not in ctx1.substate:
110 if subpath not in ctx1.substate:
111 del subpaths[subpath]
111 del subpaths[subpath]
112 missing.add(subpath)
112 missing.add(subpath)
113
113
114 for subpath, ctx in sorted(pycompat.iteritems(subpaths)):
114 for subpath, ctx in sorted(pycompat.iteritems(subpaths)):
115 yield subpath, ctx.sub(subpath)
115 yield subpath, ctx.sub(subpath)
116
116
117 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
117 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
118 # status and diff will have an accurate result when it does
118 # status and diff will have an accurate result when it does
119 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
119 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
120 # against itself.
120 # against itself.
121 for subpath in missing:
121 for subpath in missing:
122 yield subpath, ctx2.nullsub(subpath, ctx1)
122 yield subpath, ctx2.nullsub(subpath, ctx1)
123
123
124
124
125 def nochangesfound(ui, repo, excluded=None):
125 def nochangesfound(ui, repo, excluded=None):
126 '''Report no changes for push/pull, excluded is None or a list of
126 '''Report no changes for push/pull, excluded is None or a list of
127 nodes excluded from the push/pull.
127 nodes excluded from the push/pull.
128 '''
128 '''
129 secretlist = []
129 secretlist = []
130 if excluded:
130 if excluded:
131 for n in excluded:
131 for n in excluded:
132 ctx = repo[n]
132 ctx = repo[n]
133 if ctx.phase() >= phases.secret and not ctx.extinct():
133 if ctx.phase() >= phases.secret and not ctx.extinct():
134 secretlist.append(n)
134 secretlist.append(n)
135
135
136 if secretlist:
136 if secretlist:
137 ui.status(
137 ui.status(
138 _(b"no changes found (ignored %d secret changesets)\n")
138 _(b"no changes found (ignored %d secret changesets)\n")
139 % len(secretlist)
139 % len(secretlist)
140 )
140 )
141 else:
141 else:
142 ui.status(_(b"no changes found\n"))
142 ui.status(_(b"no changes found\n"))
143
143
144
144
145 def callcatch(ui, func):
145 def callcatch(ui, func):
146 """call func() with global exception handling
146 """call func() with global exception handling
147
147
148 return func() if no exception happens. otherwise do some error handling
148 return func() if no exception happens. otherwise do some error handling
149 and return an exit code accordingly. does not handle all exceptions.
149 and return an exit code accordingly. does not handle all exceptions.
150 """
150 """
151 coarse_exit_code = -1
151 coarse_exit_code = -1
152 detailed_exit_code = -1
152 detailed_exit_code = -1
153 try:
153 try:
154 try:
154 try:
155 return func()
155 return func()
156 except: # re-raises
156 except: # re-raises
157 ui.traceback()
157 ui.traceback()
158 raise
158 raise
159 # Global exception handling, alphabetically
159 # Global exception handling, alphabetically
160 # Mercurial-specific first, followed by built-in and library exceptions
160 # Mercurial-specific first, followed by built-in and library exceptions
161 except error.LockHeld as inst:
161 except error.LockHeld as inst:
162 detailed_exit_code = 20
162 detailed_exit_code = 20
163 if inst.errno == errno.ETIMEDOUT:
163 if inst.errno == errno.ETIMEDOUT:
164 reason = _(b'timed out waiting for lock held by %r') % (
164 reason = _(b'timed out waiting for lock held by %r') % (
165 pycompat.bytestr(inst.locker)
165 pycompat.bytestr(inst.locker)
166 )
166 )
167 else:
167 else:
168 reason = _(b'lock held by %r') % inst.locker
168 reason = _(b'lock held by %r') % inst.locker
169 ui.error(
169 ui.error(
170 _(b"abort: %s: %s\n")
170 _(b"abort: %s: %s\n")
171 % (inst.desc or stringutil.forcebytestr(inst.filename), reason)
171 % (inst.desc or stringutil.forcebytestr(inst.filename), reason)
172 )
172 )
173 if not inst.locker:
173 if not inst.locker:
174 ui.error(_(b"(lock might be very busy)\n"))
174 ui.error(_(b"(lock might be very busy)\n"))
175 except error.LockUnavailable as inst:
175 except error.LockUnavailable as inst:
176 detailed_exit_code = 20
176 detailed_exit_code = 20
177 ui.error(
177 ui.error(
178 _(b"abort: could not lock %s: %s\n")
178 _(b"abort: could not lock %s: %s\n")
179 % (
179 % (
180 inst.desc or stringutil.forcebytestr(inst.filename),
180 inst.desc or stringutil.forcebytestr(inst.filename),
181 encoding.strtolocal(inst.strerror),
181 encoding.strtolocal(inst.strerror),
182 )
182 )
183 )
183 )
184 except error.OutOfBandError as inst:
184 except error.OutOfBandError as inst:
185 detailed_exit_code = 100
185 detailed_exit_code = 100
186 if inst.args:
186 if inst.args:
187 msg = _(b"abort: remote error:\n")
187 msg = _(b"abort: remote error:\n")
188 else:
188 else:
189 msg = _(b"abort: remote error\n")
189 msg = _(b"abort: remote error\n")
190 ui.error(msg)
190 ui.error(msg)
191 if inst.args:
191 if inst.args:
192 ui.error(b''.join(inst.args))
192 ui.error(b''.join(inst.args))
193 if inst.hint:
193 if inst.hint:
194 ui.error(b'(%s)\n' % inst.hint)
194 ui.error(b'(%s)\n' % inst.hint)
195 except error.RepoError as inst:
195 except error.RepoError as inst:
196 ui.error(_(b"abort: %s\n") % inst)
196 ui.error(_(b"abort: %s\n") % inst)
197 if inst.hint:
197 if inst.hint:
198 ui.error(_(b"(%s)\n") % inst.hint)
198 ui.error(_(b"(%s)\n") % inst.hint)
199 except error.ResponseError as inst:
199 except error.ResponseError as inst:
200 ui.error(_(b"abort: %s") % inst.args[0])
200 ui.error(_(b"abort: %s") % inst.args[0])
201 msg = inst.args[1]
201 msg = inst.args[1]
202 if isinstance(msg, type(u'')):
202 if isinstance(msg, type(u'')):
203 msg = pycompat.sysbytes(msg)
203 msg = pycompat.sysbytes(msg)
204 if not isinstance(msg, bytes):
204 if not isinstance(msg, bytes):
205 ui.error(b" %r\n" % (msg,))
205 ui.error(b" %r\n" % (msg,))
206 elif not msg:
206 elif not msg:
207 ui.error(_(b" empty string\n"))
207 ui.error(_(b" empty string\n"))
208 else:
208 else:
209 ui.error(b"\n%r\n" % pycompat.bytestr(stringutil.ellipsis(msg)))
209 ui.error(b"\n%r\n" % pycompat.bytestr(stringutil.ellipsis(msg)))
210 except error.CensoredNodeError as inst:
210 except error.CensoredNodeError as inst:
211 ui.error(_(b"abort: file censored %s\n") % inst)
211 ui.error(_(b"abort: file censored %s\n") % inst)
212 except error.StorageError as inst:
212 except error.StorageError as inst:
213 ui.error(_(b"abort: %s\n") % inst)
213 ui.error(_(b"abort: %s\n") % inst)
214 if inst.hint:
214 if inst.hint:
215 ui.error(_(b"(%s)\n") % inst.hint)
215 ui.error(_(b"(%s)\n") % inst.hint)
216 except error.InterventionRequired as inst:
216 except error.InterventionRequired as inst:
217 ui.error(b"%s\n" % inst)
217 ui.error(b"%s\n" % inst)
218 if inst.hint:
218 if inst.hint:
219 ui.error(_(b"(%s)\n") % inst.hint)
219 ui.error(_(b"(%s)\n") % inst.hint)
220 detailed_exit_code = 240
220 detailed_exit_code = 240
221 coarse_exit_code = 1
221 coarse_exit_code = 1
222 except error.WdirUnsupported:
222 except error.WdirUnsupported:
223 ui.error(_(b"abort: working directory revision cannot be specified\n"))
223 ui.error(_(b"abort: working directory revision cannot be specified\n"))
224 except error.Abort as inst:
224 except error.Abort as inst:
225 if isinstance(inst, (error.InputError, error.ParseError)):
225 if isinstance(inst, (error.InputError, error.ParseError)):
226 detailed_exit_code = 10
226 detailed_exit_code = 10
227 elif isinstance(inst, error.StateError):
227 elif isinstance(inst, error.StateError):
228 detailed_exit_code = 20
228 detailed_exit_code = 20
229 elif isinstance(inst, error.ConfigError):
229 elif isinstance(inst, error.ConfigError):
230 detailed_exit_code = 30
230 detailed_exit_code = 30
231 elif isinstance(inst, error.SecurityError):
231 elif isinstance(inst, error.SecurityError):
232 detailed_exit_code = 150
232 detailed_exit_code = 150
233 elif isinstance(inst, error.CanceledError):
233 elif isinstance(inst, error.CanceledError):
234 detailed_exit_code = 250
234 detailed_exit_code = 250
235 ui.error(inst.format())
235 ui.error(inst.format())
236 except error.WorkerError as inst:
236 except error.WorkerError as inst:
237 # Don't print a message -- the worker already should have
237 # Don't print a message -- the worker already should have
238 return inst.status_code
238 return inst.status_code
239 except ImportError as inst:
239 except ImportError as inst:
240 ui.error(_(b"abort: %s\n") % stringutil.forcebytestr(inst))
240 ui.error(_(b"abort: %s\n") % stringutil.forcebytestr(inst))
241 m = stringutil.forcebytestr(inst).split()[-1]
241 m = stringutil.forcebytestr(inst).split()[-1]
242 if m in b"mpatch bdiff".split():
242 if m in b"mpatch bdiff".split():
243 ui.error(_(b"(did you forget to compile extensions?)\n"))
243 ui.error(_(b"(did you forget to compile extensions?)\n"))
244 elif m in b"zlib".split():
244 elif m in b"zlib".split():
245 ui.error(_(b"(is your Python install correct?)\n"))
245 ui.error(_(b"(is your Python install correct?)\n"))
246 except util.urlerr.httperror as inst:
246 except util.urlerr.httperror as inst:
247 detailed_exit_code = 100
247 detailed_exit_code = 100
248 ui.error(_(b"abort: %s\n") % stringutil.forcebytestr(inst))
248 ui.error(_(b"abort: %s\n") % stringutil.forcebytestr(inst))
249 except util.urlerr.urlerror as inst:
249 except util.urlerr.urlerror as inst:
250 detailed_exit_code = 100
250 detailed_exit_code = 100
251 try: # usually it is in the form (errno, strerror)
251 try: # usually it is in the form (errno, strerror)
252 reason = inst.reason.args[1]
252 reason = inst.reason.args[1]
253 except (AttributeError, IndexError):
253 except (AttributeError, IndexError):
254 # it might be anything, for example a string
254 # it might be anything, for example a string
255 reason = inst.reason
255 reason = inst.reason
256 if isinstance(reason, pycompat.unicode):
256 if isinstance(reason, pycompat.unicode):
257 # SSLError of Python 2.7.9 contains a unicode
257 # SSLError of Python 2.7.9 contains a unicode
258 reason = encoding.unitolocal(reason)
258 reason = encoding.unitolocal(reason)
259 ui.error(_(b"abort: error: %s\n") % stringutil.forcebytestr(reason))
259 ui.error(_(b"abort: error: %s\n") % stringutil.forcebytestr(reason))
260 except (IOError, OSError) as inst:
260 except (IOError, OSError) as inst:
261 if (
261 if (
262 util.safehasattr(inst, b"args")
262 util.safehasattr(inst, b"args")
263 and inst.args
263 and inst.args
264 and inst.args[0] == errno.EPIPE
264 and inst.args[0] == errno.EPIPE
265 ):
265 ):
266 pass
266 pass
267 elif getattr(inst, "strerror", None): # common IOError or OSError
267 elif getattr(inst, "strerror", None): # common IOError or OSError
268 if getattr(inst, "filename", None) is not None:
268 if getattr(inst, "filename", None) is not None:
269 ui.error(
269 ui.error(
270 _(b"abort: %s: '%s'\n")
270 _(b"abort: %s: '%s'\n")
271 % (
271 % (
272 encoding.strtolocal(inst.strerror),
272 encoding.strtolocal(inst.strerror),
273 stringutil.forcebytestr(inst.filename),
273 stringutil.forcebytestr(inst.filename),
274 )
274 )
275 )
275 )
276 else:
276 else:
277 ui.error(_(b"abort: %s\n") % encoding.strtolocal(inst.strerror))
277 ui.error(_(b"abort: %s\n") % encoding.strtolocal(inst.strerror))
278 else: # suspicious IOError
278 else: # suspicious IOError
279 raise
279 raise
280 except MemoryError:
280 except MemoryError:
281 ui.error(_(b"abort: out of memory\n"))
281 ui.error(_(b"abort: out of memory\n"))
282 except SystemExit as inst:
282 except SystemExit as inst:
283 # Commands shouldn't sys.exit directly, but give a return code.
283 # Commands shouldn't sys.exit directly, but give a return code.
284 # Just in case catch this and and pass exit code to caller.
284 # Just in case catch this and and pass exit code to caller.
285 detailed_exit_code = 254
285 detailed_exit_code = 254
286 coarse_exit_code = inst.code
286 coarse_exit_code = inst.code
287
287
288 if ui.configbool(b'ui', b'detailed-exit-code'):
288 if ui.configbool(b'ui', b'detailed-exit-code'):
289 return detailed_exit_code
289 return detailed_exit_code
290 else:
290 else:
291 return coarse_exit_code
291 return coarse_exit_code
292
292
293
293
294 def checknewlabel(repo, lbl, kind):
294 def checknewlabel(repo, lbl, kind):
295 # Do not use the "kind" parameter in ui output.
295 # Do not use the "kind" parameter in ui output.
296 # It makes strings difficult to translate.
296 # It makes strings difficult to translate.
297 if lbl in [b'tip', b'.', b'null']:
297 if lbl in [b'tip', b'.', b'null']:
298 raise error.InputError(_(b"the name '%s' is reserved") % lbl)
298 raise error.InputError(_(b"the name '%s' is reserved") % lbl)
299 for c in (b':', b'\0', b'\n', b'\r'):
299 for c in (b':', b'\0', b'\n', b'\r'):
300 if c in lbl:
300 if c in lbl:
301 raise error.InputError(
301 raise error.InputError(
302 _(b"%r cannot be used in a name") % pycompat.bytestr(c)
302 _(b"%r cannot be used in a name") % pycompat.bytestr(c)
303 )
303 )
304 try:
304 try:
305 int(lbl)
305 int(lbl)
306 raise error.InputError(_(b"cannot use an integer as a name"))
306 raise error.InputError(_(b"cannot use an integer as a name"))
307 except ValueError:
307 except ValueError:
308 pass
308 pass
309 if lbl.strip() != lbl:
309 if lbl.strip() != lbl:
310 raise error.InputError(
310 raise error.InputError(
311 _(b"leading or trailing whitespace in name %r") % lbl
311 _(b"leading or trailing whitespace in name %r") % lbl
312 )
312 )
313
313
314
314
315 def checkfilename(f):
315 def checkfilename(f):
316 '''Check that the filename f is an acceptable filename for a tracked file'''
316 '''Check that the filename f is an acceptable filename for a tracked file'''
317 if b'\r' in f or b'\n' in f:
317 if b'\r' in f or b'\n' in f:
318 raise error.InputError(
318 raise error.InputError(
319 _(b"'\\n' and '\\r' disallowed in filenames: %r")
319 _(b"'\\n' and '\\r' disallowed in filenames: %r")
320 % pycompat.bytestr(f)
320 % pycompat.bytestr(f)
321 )
321 )
322
322
323
323
324 def checkportable(ui, f):
324 def checkportable(ui, f):
325 '''Check if filename f is portable and warn or abort depending on config'''
325 '''Check if filename f is portable and warn or abort depending on config'''
326 checkfilename(f)
326 checkfilename(f)
327 abort, warn = checkportabilityalert(ui)
327 abort, warn = checkportabilityalert(ui)
328 if abort or warn:
328 if abort or warn:
329 msg = util.checkwinfilename(f)
329 msg = util.checkwinfilename(f)
330 if msg:
330 if msg:
331 msg = b"%s: %s" % (msg, procutil.shellquote(f))
331 msg = b"%s: %s" % (msg, procutil.shellquote(f))
332 if abort:
332 if abort:
333 raise error.InputError(msg)
333 raise error.InputError(msg)
334 ui.warn(_(b"warning: %s\n") % msg)
334 ui.warn(_(b"warning: %s\n") % msg)
335
335
336
336
337 def checkportabilityalert(ui):
337 def checkportabilityalert(ui):
338 '''check if the user's config requests nothing, a warning, or abort for
338 '''check if the user's config requests nothing, a warning, or abort for
339 non-portable filenames'''
339 non-portable filenames'''
340 val = ui.config(b'ui', b'portablefilenames')
340 val = ui.config(b'ui', b'portablefilenames')
341 lval = val.lower()
341 lval = val.lower()
342 bval = stringutil.parsebool(val)
342 bval = stringutil.parsebool(val)
343 abort = pycompat.iswindows or lval == b'abort'
343 abort = pycompat.iswindows or lval == b'abort'
344 warn = bval or lval == b'warn'
344 warn = bval or lval == b'warn'
345 if bval is None and not (warn or abort or lval == b'ignore'):
345 if bval is None and not (warn or abort or lval == b'ignore'):
346 raise error.ConfigError(
346 raise error.ConfigError(
347 _(b"ui.portablefilenames value is invalid ('%s')") % val
347 _(b"ui.portablefilenames value is invalid ('%s')") % val
348 )
348 )
349 return abort, warn
349 return abort, warn
350
350
351
351
352 class casecollisionauditor(object):
352 class casecollisionauditor(object):
353 def __init__(self, ui, abort, dirstate):
353 def __init__(self, ui, abort, dirstate):
354 self._ui = ui
354 self._ui = ui
355 self._abort = abort
355 self._abort = abort
356 allfiles = b'\0'.join(dirstate)
356 allfiles = b'\0'.join(dirstate)
357 self._loweredfiles = set(encoding.lower(allfiles).split(b'\0'))
357 self._loweredfiles = set(encoding.lower(allfiles).split(b'\0'))
358 self._dirstate = dirstate
358 self._dirstate = dirstate
359 # The purpose of _newfiles is so that we don't complain about
359 # The purpose of _newfiles is so that we don't complain about
360 # case collisions if someone were to call this object with the
360 # case collisions if someone were to call this object with the
361 # same filename twice.
361 # same filename twice.
362 self._newfiles = set()
362 self._newfiles = set()
363
363
364 def __call__(self, f):
364 def __call__(self, f):
365 if f in self._newfiles:
365 if f in self._newfiles:
366 return
366 return
367 fl = encoding.lower(f)
367 fl = encoding.lower(f)
368 if fl in self._loweredfiles and f not in self._dirstate:
368 if fl in self._loweredfiles and f not in self._dirstate:
369 msg = _(b'possible case-folding collision for %s') % f
369 msg = _(b'possible case-folding collision for %s') % f
370 if self._abort:
370 if self._abort:
371 raise error.Abort(msg)
371 raise error.Abort(msg)
372 self._ui.warn(_(b"warning: %s\n") % msg)
372 self._ui.warn(_(b"warning: %s\n") % msg)
373 self._loweredfiles.add(fl)
373 self._loweredfiles.add(fl)
374 self._newfiles.add(f)
374 self._newfiles.add(f)
375
375
376
376
377 def filteredhash(repo, maxrev):
377 def filteredhash(repo, maxrev):
378 """build hash of filtered revisions in the current repoview.
378 """build hash of filtered revisions in the current repoview.
379
379
380 Multiple caches perform up-to-date validation by checking that the
380 Multiple caches perform up-to-date validation by checking that the
381 tiprev and tipnode stored in the cache file match the current repository.
381 tiprev and tipnode stored in the cache file match the current repository.
382 However, this is not sufficient for validating repoviews because the set
382 However, this is not sufficient for validating repoviews because the set
383 of revisions in the view may change without the repository tiprev and
383 of revisions in the view may change without the repository tiprev and
384 tipnode changing.
384 tipnode changing.
385
385
386 This function hashes all the revs filtered from the view and returns
386 This function hashes all the revs filtered from the view and returns
387 that SHA-1 digest.
387 that SHA-1 digest.
388 """
388 """
389 cl = repo.changelog
389 cl = repo.changelog
390 if not cl.filteredrevs:
390 if not cl.filteredrevs:
391 return None
391 return None
392 key = cl._filteredrevs_hashcache.get(maxrev)
392 key = cl._filteredrevs_hashcache.get(maxrev)
393 if not key:
393 if not key:
394 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
394 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
395 if revs:
395 if revs:
396 s = hashutil.sha1()
396 s = hashutil.sha1()
397 for rev in revs:
397 for rev in revs:
398 s.update(b'%d;' % rev)
398 s.update(b'%d;' % rev)
399 key = s.digest()
399 key = s.digest()
400 cl._filteredrevs_hashcache[maxrev] = key
400 cl._filteredrevs_hashcache[maxrev] = key
401 return key
401 return key
402
402
403
403
404 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
404 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
405 '''yield every hg repository under path, always recursively.
405 '''yield every hg repository under path, always recursively.
406 The recurse flag will only control recursion into repo working dirs'''
406 The recurse flag will only control recursion into repo working dirs'''
407
407
408 def errhandler(err):
408 def errhandler(err):
409 if err.filename == path:
409 if err.filename == path:
410 raise err
410 raise err
411
411
412 samestat = getattr(os.path, 'samestat', None)
412 samestat = getattr(os.path, 'samestat', None)
413 if followsym and samestat is not None:
413 if followsym and samestat is not None:
414
414
415 def adddir(dirlst, dirname):
415 def adddir(dirlst, dirname):
416 dirstat = os.stat(dirname)
416 dirstat = os.stat(dirname)
417 match = any(samestat(dirstat, lstdirstat) for lstdirstat in dirlst)
417 match = any(samestat(dirstat, lstdirstat) for lstdirstat in dirlst)
418 if not match:
418 if not match:
419 dirlst.append(dirstat)
419 dirlst.append(dirstat)
420 return not match
420 return not match
421
421
422 else:
422 else:
423 followsym = False
423 followsym = False
424
424
425 if (seen_dirs is None) and followsym:
425 if (seen_dirs is None) and followsym:
426 seen_dirs = []
426 seen_dirs = []
427 adddir(seen_dirs, path)
427 adddir(seen_dirs, path)
428 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
428 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
429 dirs.sort()
429 dirs.sort()
430 if b'.hg' in dirs:
430 if b'.hg' in dirs:
431 yield root # found a repository
431 yield root # found a repository
432 qroot = os.path.join(root, b'.hg', b'patches')
432 qroot = os.path.join(root, b'.hg', b'patches')
433 if os.path.isdir(os.path.join(qroot, b'.hg')):
433 if os.path.isdir(os.path.join(qroot, b'.hg')):
434 yield qroot # we have a patch queue repo here
434 yield qroot # we have a patch queue repo here
435 if recurse:
435 if recurse:
436 # avoid recursing inside the .hg directory
436 # avoid recursing inside the .hg directory
437 dirs.remove(b'.hg')
437 dirs.remove(b'.hg')
438 else:
438 else:
439 dirs[:] = [] # don't descend further
439 dirs[:] = [] # don't descend further
440 elif followsym:
440 elif followsym:
441 newdirs = []
441 newdirs = []
442 for d in dirs:
442 for d in dirs:
443 fname = os.path.join(root, d)
443 fname = os.path.join(root, d)
444 if adddir(seen_dirs, fname):
444 if adddir(seen_dirs, fname):
445 if os.path.islink(fname):
445 if os.path.islink(fname):
446 for hgname in walkrepos(fname, True, seen_dirs):
446 for hgname in walkrepos(fname, True, seen_dirs):
447 yield hgname
447 yield hgname
448 else:
448 else:
449 newdirs.append(d)
449 newdirs.append(d)
450 dirs[:] = newdirs
450 dirs[:] = newdirs
451
451
452
452
453 def binnode(ctx):
453 def binnode(ctx):
454 """Return binary node id for a given basectx"""
454 """Return binary node id for a given basectx"""
455 node = ctx.node()
455 node = ctx.node()
456 if node is None:
456 if node is None:
457 return wdirid
457 return wdirid
458 return node
458 return node
459
459
460
460
461 def intrev(ctx):
461 def intrev(ctx):
462 """Return integer for a given basectx that can be used in comparison or
462 """Return integer for a given basectx that can be used in comparison or
463 arithmetic operation"""
463 arithmetic operation"""
464 rev = ctx.rev()
464 rev = ctx.rev()
465 if rev is None:
465 if rev is None:
466 return wdirrev
466 return wdirrev
467 return rev
467 return rev
468
468
469
469
470 def formatchangeid(ctx):
470 def formatchangeid(ctx):
471 """Format changectx as '{rev}:{node|formatnode}', which is the default
471 """Format changectx as '{rev}:{node|formatnode}', which is the default
472 template provided by logcmdutil.changesettemplater"""
472 template provided by logcmdutil.changesettemplater"""
473 repo = ctx.repo()
473 repo = ctx.repo()
474 return formatrevnode(repo.ui, intrev(ctx), binnode(ctx))
474 return formatrevnode(repo.ui, intrev(ctx), binnode(ctx))
475
475
476
476
477 def formatrevnode(ui, rev, node):
477 def formatrevnode(ui, rev, node):
478 """Format given revision and node depending on the current verbosity"""
478 """Format given revision and node depending on the current verbosity"""
479 if ui.debugflag:
479 if ui.debugflag:
480 hexfunc = hex
480 hexfunc = hex
481 else:
481 else:
482 hexfunc = short
482 hexfunc = short
483 return b'%d:%s' % (rev, hexfunc(node))
483 return b'%d:%s' % (rev, hexfunc(node))
484
484
485
485
486 def resolvehexnodeidprefix(repo, prefix):
486 def resolvehexnodeidprefix(repo, prefix):
487 if prefix.startswith(b'x'):
487 if prefix.startswith(b'x'):
488 prefix = prefix[1:]
488 prefix = prefix[1:]
489 try:
489 try:
490 # Uses unfiltered repo because it's faster when prefix is ambiguous/
490 # Uses unfiltered repo because it's faster when prefix is ambiguous/
491 # This matches the shortesthexnodeidprefix() function below.
491 # This matches the shortesthexnodeidprefix() function below.
492 node = repo.unfiltered().changelog._partialmatch(prefix)
492 node = repo.unfiltered().changelog._partialmatch(prefix)
493 except error.AmbiguousPrefixLookupError:
493 except error.AmbiguousPrefixLookupError:
494 revset = repo.ui.config(
494 revset = repo.ui.config(
495 b'experimental', b'revisions.disambiguatewithin'
495 b'experimental', b'revisions.disambiguatewithin'
496 )
496 )
497 if revset:
497 if revset:
498 # Clear config to avoid infinite recursion
498 # Clear config to avoid infinite recursion
499 configoverrides = {
499 configoverrides = {
500 (b'experimental', b'revisions.disambiguatewithin'): None
500 (b'experimental', b'revisions.disambiguatewithin'): None
501 }
501 }
502 with repo.ui.configoverride(configoverrides):
502 with repo.ui.configoverride(configoverrides):
503 revs = repo.anyrevs([revset], user=True)
503 revs = repo.anyrevs([revset], user=True)
504 matches = []
504 matches = []
505 for rev in revs:
505 for rev in revs:
506 node = repo.changelog.node(rev)
506 node = repo.changelog.node(rev)
507 if hex(node).startswith(prefix):
507 if hex(node).startswith(prefix):
508 matches.append(node)
508 matches.append(node)
509 if len(matches) == 1:
509 if len(matches) == 1:
510 return matches[0]
510 return matches[0]
511 raise
511 raise
512 if node is None:
512 if node is None:
513 return
513 return
514 repo.changelog.rev(node) # make sure node isn't filtered
514 repo.changelog.rev(node) # make sure node isn't filtered
515 return node
515 return node
516
516
517
517
518 def mayberevnum(repo, prefix):
518 def mayberevnum(repo, prefix):
519 """Checks if the given prefix may be mistaken for a revision number"""
519 """Checks if the given prefix may be mistaken for a revision number"""
520 try:
520 try:
521 i = int(prefix)
521 i = int(prefix)
522 # if we are a pure int, then starting with zero will not be
522 # if we are a pure int, then starting with zero will not be
523 # confused as a rev; or, obviously, if the int is larger
523 # confused as a rev; or, obviously, if the int is larger
524 # than the value of the tip rev. We still need to disambiguate if
524 # than the value of the tip rev. We still need to disambiguate if
525 # prefix == '0', since that *is* a valid revnum.
525 # prefix == '0', since that *is* a valid revnum.
526 if (prefix != b'0' and prefix[0:1] == b'0') or i >= len(repo):
526 if (prefix != b'0' and prefix[0:1] == b'0') or i >= len(repo):
527 return False
527 return False
528 return True
528 return True
529 except ValueError:
529 except ValueError:
530 return False
530 return False
531
531
532
532
533 def shortesthexnodeidprefix(repo, node, minlength=1, cache=None):
533 def shortesthexnodeidprefix(repo, node, minlength=1, cache=None):
534 """Find the shortest unambiguous prefix that matches hexnode.
534 """Find the shortest unambiguous prefix that matches hexnode.
535
535
536 If "cache" is not None, it must be a dictionary that can be used for
536 If "cache" is not None, it must be a dictionary that can be used for
537 caching between calls to this method.
537 caching between calls to this method.
538 """
538 """
539 # _partialmatch() of filtered changelog could take O(len(repo)) time,
539 # _partialmatch() of filtered changelog could take O(len(repo)) time,
540 # which would be unacceptably slow. so we look for hash collision in
540 # which would be unacceptably slow. so we look for hash collision in
541 # unfiltered space, which means some hashes may be slightly longer.
541 # unfiltered space, which means some hashes may be slightly longer.
542
542
543 minlength = max(minlength, 1)
543 minlength = max(minlength, 1)
544
544
545 def disambiguate(prefix):
545 def disambiguate(prefix):
546 """Disambiguate against revnums."""
546 """Disambiguate against revnums."""
547 if repo.ui.configbool(b'experimental', b'revisions.prefixhexnode'):
547 if repo.ui.configbool(b'experimental', b'revisions.prefixhexnode'):
548 if mayberevnum(repo, prefix):
548 if mayberevnum(repo, prefix):
549 return b'x' + prefix
549 return b'x' + prefix
550 else:
550 else:
551 return prefix
551 return prefix
552
552
553 hexnode = hex(node)
553 hexnode = hex(node)
554 for length in range(len(prefix), len(hexnode) + 1):
554 for length in range(len(prefix), len(hexnode) + 1):
555 prefix = hexnode[:length]
555 prefix = hexnode[:length]
556 if not mayberevnum(repo, prefix):
556 if not mayberevnum(repo, prefix):
557 return prefix
557 return prefix
558
558
559 cl = repo.unfiltered().changelog
559 cl = repo.unfiltered().changelog
560 revset = repo.ui.config(b'experimental', b'revisions.disambiguatewithin')
560 revset = repo.ui.config(b'experimental', b'revisions.disambiguatewithin')
561 if revset:
561 if revset:
562 revs = None
562 revs = None
563 if cache is not None:
563 if cache is not None:
564 revs = cache.get(b'disambiguationrevset')
564 revs = cache.get(b'disambiguationrevset')
565 if revs is None:
565 if revs is None:
566 revs = repo.anyrevs([revset], user=True)
566 revs = repo.anyrevs([revset], user=True)
567 if cache is not None:
567 if cache is not None:
568 cache[b'disambiguationrevset'] = revs
568 cache[b'disambiguationrevset'] = revs
569 if cl.rev(node) in revs:
569 if cl.rev(node) in revs:
570 hexnode = hex(node)
570 hexnode = hex(node)
571 nodetree = None
571 nodetree = None
572 if cache is not None:
572 if cache is not None:
573 nodetree = cache.get(b'disambiguationnodetree')
573 nodetree = cache.get(b'disambiguationnodetree')
574 if not nodetree:
574 if not nodetree:
575 if util.safehasattr(parsers, 'nodetree'):
575 if util.safehasattr(parsers, 'nodetree'):
576 # The CExt is the only implementation to provide a nodetree
576 # The CExt is the only implementation to provide a nodetree
577 # class so far.
577 # class so far.
578 index = cl.index
578 index = cl.index
579 if util.safehasattr(index, 'get_cindex'):
579 if util.safehasattr(index, 'get_cindex'):
580 # the rust wrapped need to give access to its internal index
580 # the rust wrapped need to give access to its internal index
581 index = index.get_cindex()
581 index = index.get_cindex()
582 nodetree = parsers.nodetree(index, len(revs))
582 nodetree = parsers.nodetree(index, len(revs))
583 for r in revs:
583 for r in revs:
584 nodetree.insert(r)
584 nodetree.insert(r)
585 if cache is not None:
585 if cache is not None:
586 cache[b'disambiguationnodetree'] = nodetree
586 cache[b'disambiguationnodetree'] = nodetree
587 if nodetree is not None:
587 if nodetree is not None:
588 length = max(nodetree.shortest(node), minlength)
588 length = max(nodetree.shortest(node), minlength)
589 prefix = hexnode[:length]
589 prefix = hexnode[:length]
590 return disambiguate(prefix)
590 return disambiguate(prefix)
591 for length in range(minlength, len(hexnode) + 1):
591 for length in range(minlength, len(hexnode) + 1):
592 matches = []
592 matches = []
593 prefix = hexnode[:length]
593 prefix = hexnode[:length]
594 for rev in revs:
594 for rev in revs:
595 otherhexnode = repo[rev].hex()
595 otherhexnode = repo[rev].hex()
596 if prefix == otherhexnode[:length]:
596 if prefix == otherhexnode[:length]:
597 matches.append(otherhexnode)
597 matches.append(otherhexnode)
598 if len(matches) == 1:
598 if len(matches) == 1:
599 return disambiguate(prefix)
599 return disambiguate(prefix)
600
600
601 try:
601 try:
602 return disambiguate(cl.shortest(node, minlength))
602 return disambiguate(cl.shortest(node, minlength))
603 except error.LookupError:
603 except error.LookupError:
604 raise error.RepoLookupError()
604 raise error.RepoLookupError()
605
605
606
606
607 def isrevsymbol(repo, symbol):
607 def isrevsymbol(repo, symbol):
608 """Checks if a symbol exists in the repo.
608 """Checks if a symbol exists in the repo.
609
609
610 See revsymbol() for details. Raises error.AmbiguousPrefixLookupError if the
610 See revsymbol() for details. Raises error.AmbiguousPrefixLookupError if the
611 symbol is an ambiguous nodeid prefix.
611 symbol is an ambiguous nodeid prefix.
612 """
612 """
613 try:
613 try:
614 revsymbol(repo, symbol)
614 revsymbol(repo, symbol)
615 return True
615 return True
616 except error.RepoLookupError:
616 except error.RepoLookupError:
617 return False
617 return False
618
618
619
619
620 def revsymbol(repo, symbol):
620 def revsymbol(repo, symbol):
621 """Returns a context given a single revision symbol (as string).
621 """Returns a context given a single revision symbol (as string).
622
622
623 This is similar to revsingle(), but accepts only a single revision symbol,
623 This is similar to revsingle(), but accepts only a single revision symbol,
624 i.e. things like ".", "tip", "1234", "deadbeef", "my-bookmark" work, but
624 i.e. things like ".", "tip", "1234", "deadbeef", "my-bookmark" work, but
625 not "max(public())".
625 not "max(public())".
626 """
626 """
627 if not isinstance(symbol, bytes):
627 if not isinstance(symbol, bytes):
628 msg = (
628 msg = (
629 b"symbol (%s of type %s) was not a string, did you mean "
629 b"symbol (%s of type %s) was not a string, did you mean "
630 b"repo[symbol]?" % (symbol, type(symbol))
630 b"repo[symbol]?" % (symbol, type(symbol))
631 )
631 )
632 raise error.ProgrammingError(msg)
632 raise error.ProgrammingError(msg)
633 try:
633 try:
634 if symbol in (b'.', b'tip', b'null'):
634 if symbol in (b'.', b'tip', b'null'):
635 return repo[symbol]
635 return repo[symbol]
636
636
637 try:
637 try:
638 r = int(symbol)
638 r = int(symbol)
639 if b'%d' % r != symbol:
639 if b'%d' % r != symbol:
640 raise ValueError
640 raise ValueError
641 l = len(repo.changelog)
641 l = len(repo.changelog)
642 if r < 0:
642 if r < 0:
643 r += l
643 r += l
644 if r < 0 or r >= l and r != wdirrev:
644 if r < 0 or r >= l and r != wdirrev:
645 raise ValueError
645 raise ValueError
646 return repo[r]
646 return repo[r]
647 except error.FilteredIndexError:
647 except error.FilteredIndexError:
648 raise
648 raise
649 except (ValueError, OverflowError, IndexError):
649 except (ValueError, OverflowError, IndexError):
650 pass
650 pass
651
651
652 if len(symbol) == 40:
652 if len(symbol) == 40:
653 try:
653 try:
654 node = bin(symbol)
654 node = bin(symbol)
655 rev = repo.changelog.rev(node)
655 rev = repo.changelog.rev(node)
656 return repo[rev]
656 return repo[rev]
657 except error.FilteredLookupError:
657 except error.FilteredLookupError:
658 raise
658 raise
659 except (TypeError, LookupError):
659 except (TypeError, LookupError):
660 pass
660 pass
661
661
662 # look up bookmarks through the name interface
662 # look up bookmarks through the name interface
663 try:
663 try:
664 node = repo.names.singlenode(repo, symbol)
664 node = repo.names.singlenode(repo, symbol)
665 rev = repo.changelog.rev(node)
665 rev = repo.changelog.rev(node)
666 return repo[rev]
666 return repo[rev]
667 except KeyError:
667 except KeyError:
668 pass
668 pass
669
669
670 node = resolvehexnodeidprefix(repo, symbol)
670 node = resolvehexnodeidprefix(repo, symbol)
671 if node is not None:
671 if node is not None:
672 rev = repo.changelog.rev(node)
672 rev = repo.changelog.rev(node)
673 return repo[rev]
673 return repo[rev]
674
674
675 raise error.RepoLookupError(_(b"unknown revision '%s'") % symbol)
675 raise error.RepoLookupError(_(b"unknown revision '%s'") % symbol)
676
676
677 except error.WdirUnsupported:
677 except error.WdirUnsupported:
678 return repo[None]
678 return repo[None]
679 except (
679 except (
680 error.FilteredIndexError,
680 error.FilteredIndexError,
681 error.FilteredLookupError,
681 error.FilteredLookupError,
682 error.FilteredRepoLookupError,
682 error.FilteredRepoLookupError,
683 ):
683 ):
684 raise _filterederror(repo, symbol)
684 raise _filterederror(repo, symbol)
685
685
686
686
687 def _filterederror(repo, changeid):
687 def _filterederror(repo, changeid):
688 """build an exception to be raised about a filtered changeid
688 """build an exception to be raised about a filtered changeid
689
689
690 This is extracted in a function to help extensions (eg: evolve) to
690 This is extracted in a function to help extensions (eg: evolve) to
691 experiment with various message variants."""
691 experiment with various message variants."""
692 if repo.filtername.startswith(b'visible'):
692 if repo.filtername.startswith(b'visible'):
693
693
694 # Check if the changeset is obsolete
694 # Check if the changeset is obsolete
695 unfilteredrepo = repo.unfiltered()
695 unfilteredrepo = repo.unfiltered()
696 ctx = revsymbol(unfilteredrepo, changeid)
696 ctx = revsymbol(unfilteredrepo, changeid)
697
697
698 # If the changeset is obsolete, enrich the message with the reason
698 # If the changeset is obsolete, enrich the message with the reason
699 # that made this changeset not visible
699 # that made this changeset not visible
700 if ctx.obsolete():
700 if ctx.obsolete():
701 msg = obsutil._getfilteredreason(repo, changeid, ctx)
701 msg = obsutil._getfilteredreason(repo, changeid, ctx)
702 else:
702 else:
703 msg = _(b"hidden revision '%s'") % changeid
703 msg = _(b"hidden revision '%s'") % changeid
704
704
705 hint = _(b'use --hidden to access hidden revisions')
705 hint = _(b'use --hidden to access hidden revisions')
706
706
707 return error.FilteredRepoLookupError(msg, hint=hint)
707 return error.FilteredRepoLookupError(msg, hint=hint)
708 msg = _(b"filtered revision '%s' (not in '%s' subset)")
708 msg = _(b"filtered revision '%s' (not in '%s' subset)")
709 msg %= (changeid, repo.filtername)
709 msg %= (changeid, repo.filtername)
710 return error.FilteredRepoLookupError(msg)
710 return error.FilteredRepoLookupError(msg)
711
711
712
712
713 def revsingle(repo, revspec, default=b'.', localalias=None):
713 def revsingle(repo, revspec, default=b'.', localalias=None):
714 if not revspec and revspec != 0:
714 if not revspec and revspec != 0:
715 return repo[default]
715 return repo[default]
716
716
717 l = revrange(repo, [revspec], localalias=localalias)
717 l = revrange(repo, [revspec], localalias=localalias)
718 if not l:
718 if not l:
719 raise error.Abort(_(b'empty revision set'))
719 raise error.Abort(_(b'empty revision set'))
720 return repo[l.last()]
720 return repo[l.last()]
721
721
722
722
723 def _pairspec(revspec):
723 def _pairspec(revspec):
724 tree = revsetlang.parse(revspec)
724 tree = revsetlang.parse(revspec)
725 return tree and tree[0] in (
725 return tree and tree[0] in (
726 b'range',
726 b'range',
727 b'rangepre',
727 b'rangepre',
728 b'rangepost',
728 b'rangepost',
729 b'rangeall',
729 b'rangeall',
730 )
730 )
731
731
732
732
733 def revpair(repo, revs):
733 def revpair(repo, revs):
734 if not revs:
734 if not revs:
735 return repo[b'.'], repo[None]
735 return repo[b'.'], repo[None]
736
736
737 l = revrange(repo, revs)
737 l = revrange(repo, revs)
738
738
739 if not l:
739 if not l:
740 raise error.Abort(_(b'empty revision range'))
740 raise error.Abort(_(b'empty revision range'))
741
741
742 first = l.first()
742 first = l.first()
743 second = l.last()
743 second = l.last()
744
744
745 if (
745 if (
746 first == second
746 first == second
747 and len(revs) >= 2
747 and len(revs) >= 2
748 and not all(revrange(repo, [r]) for r in revs)
748 and not all(revrange(repo, [r]) for r in revs)
749 ):
749 ):
750 raise error.Abort(_(b'empty revision on one side of range'))
750 raise error.Abort(_(b'empty revision on one side of range'))
751
751
752 # if top-level is range expression, the result must always be a pair
752 # if top-level is range expression, the result must always be a pair
753 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
753 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
754 return repo[first], repo[None]
754 return repo[first], repo[None]
755
755
756 return repo[first], repo[second]
756 return repo[first], repo[second]
757
757
758
758
759 def revrange(repo, specs, localalias=None):
759 def revrange(repo, specs, localalias=None):
760 """Execute 1 to many revsets and return the union.
760 """Execute 1 to many revsets and return the union.
761
761
762 This is the preferred mechanism for executing revsets using user-specified
762 This is the preferred mechanism for executing revsets using user-specified
763 config options, such as revset aliases.
763 config options, such as revset aliases.
764
764
765 The revsets specified by ``specs`` will be executed via a chained ``OR``
765 The revsets specified by ``specs`` will be executed via a chained ``OR``
766 expression. If ``specs`` is empty, an empty result is returned.
766 expression. If ``specs`` is empty, an empty result is returned.
767
767
768 ``specs`` can contain integers, in which case they are assumed to be
768 ``specs`` can contain integers, in which case they are assumed to be
769 revision numbers.
769 revision numbers.
770
770
771 It is assumed the revsets are already formatted. If you have arguments
771 It is assumed the revsets are already formatted. If you have arguments
772 that need to be expanded in the revset, call ``revsetlang.formatspec()``
772 that need to be expanded in the revset, call ``revsetlang.formatspec()``
773 and pass the result as an element of ``specs``.
773 and pass the result as an element of ``specs``.
774
774
775 Specifying a single revset is allowed.
775 Specifying a single revset is allowed.
776
776
777 Returns a ``smartset.abstractsmartset`` which is a list-like interface over
777 Returns a ``smartset.abstractsmartset`` which is a list-like interface over
778 integer revisions.
778 integer revisions.
779 """
779 """
780 allspecs = []
780 allspecs = []
781 for spec in specs:
781 for spec in specs:
782 if isinstance(spec, int):
782 if isinstance(spec, int):
783 spec = revsetlang.formatspec(b'%d', spec)
783 spec = revsetlang.formatspec(b'%d', spec)
784 allspecs.append(spec)
784 allspecs.append(spec)
785 return repo.anyrevs(allspecs, user=True, localalias=localalias)
785 return repo.anyrevs(allspecs, user=True, localalias=localalias)
786
786
787
787
788 def increasingwindows(windowsize=8, sizelimit=512):
788 def increasingwindows(windowsize=8, sizelimit=512):
789 while True:
789 while True:
790 yield windowsize
790 yield windowsize
791 if windowsize < sizelimit:
791 if windowsize < sizelimit:
792 windowsize *= 2
792 windowsize *= 2
793
793
794
794
795 def walkchangerevs(repo, revs, makefilematcher, prepare):
795 def walkchangerevs(repo, revs, makefilematcher, prepare):
796 '''Iterate over files and the revs in a "windowed" way.
796 '''Iterate over files and the revs in a "windowed" way.
797
797
798 Callers most commonly need to iterate backwards over the history
798 Callers most commonly need to iterate backwards over the history
799 in which they are interested. Doing so has awful (quadratic-looking)
799 in which they are interested. Doing so has awful (quadratic-looking)
800 performance, so we use iterators in a "windowed" way.
800 performance, so we use iterators in a "windowed" way.
801
801
802 We walk a window of revisions in the desired order. Within the
802 We walk a window of revisions in the desired order. Within the
803 window, we first walk forwards to gather data, then in the desired
803 window, we first walk forwards to gather data, then in the desired
804 order (usually backwards) to display it.
804 order (usually backwards) to display it.
805
805
806 This function returns an iterator yielding contexts. Before
806 This function returns an iterator yielding contexts. Before
807 yielding each context, the iterator will first call the prepare
807 yielding each context, the iterator will first call the prepare
808 function on each context in the window in forward order.'''
808 function on each context in the window in forward order.'''
809
809
810 if not revs:
810 if not revs:
811 return []
811 return []
812 change = repo.__getitem__
812 change = repo.__getitem__
813
813
814 def iterate():
814 def iterate():
815 it = iter(revs)
815 it = iter(revs)
816 stopiteration = False
816 stopiteration = False
817 for windowsize in increasingwindows():
817 for windowsize in increasingwindows():
818 nrevs = []
818 nrevs = []
819 for i in pycompat.xrange(windowsize):
819 for i in pycompat.xrange(windowsize):
820 rev = next(it, None)
820 rev = next(it, None)
821 if rev is None:
821 if rev is None:
822 stopiteration = True
822 stopiteration = True
823 break
823 break
824 nrevs.append(rev)
824 nrevs.append(rev)
825 for rev in sorted(nrevs):
825 for rev in sorted(nrevs):
826 ctx = change(rev)
826 ctx = change(rev)
827 prepare(ctx, makefilematcher(ctx))
827 prepare(ctx, makefilematcher(ctx))
828 for rev in nrevs:
828 for rev in nrevs:
829 yield change(rev)
829 yield change(rev)
830
830
831 if stopiteration:
831 if stopiteration:
832 break
832 break
833
833
834 return iterate()
834 return iterate()
835
835
836
836
837 def meaningfulparents(repo, ctx):
837 def meaningfulparents(repo, ctx):
838 """Return list of meaningful (or all if debug) parentrevs for rev.
838 """Return list of meaningful (or all if debug) parentrevs for rev.
839
839
840 For merges (two non-nullrev revisions) both parents are meaningful.
840 For merges (two non-nullrev revisions) both parents are meaningful.
841 Otherwise the first parent revision is considered meaningful if it
841 Otherwise the first parent revision is considered meaningful if it
842 is not the preceding revision.
842 is not the preceding revision.
843 """
843 """
844 parents = ctx.parents()
844 parents = ctx.parents()
845 if len(parents) > 1:
845 if len(parents) > 1:
846 return parents
846 return parents
847 if repo.ui.debugflag:
847 if repo.ui.debugflag:
848 return [parents[0], repo[nullrev]]
848 return [parents[0], repo[nullrev]]
849 if parents[0].rev() >= intrev(ctx) - 1:
849 if parents[0].rev() >= intrev(ctx) - 1:
850 return []
850 return []
851 return parents
851 return parents
852
852
853
853
854 def getuipathfn(repo, legacyrelativevalue=False, forcerelativevalue=None):
854 def getuipathfn(repo, legacyrelativevalue=False, forcerelativevalue=None):
855 """Return a function that produced paths for presenting to the user.
855 """Return a function that produced paths for presenting to the user.
856
856
857 The returned function takes a repo-relative path and produces a path
857 The returned function takes a repo-relative path and produces a path
858 that can be presented in the UI.
858 that can be presented in the UI.
859
859
860 Depending on the value of ui.relative-paths, either a repo-relative or
860 Depending on the value of ui.relative-paths, either a repo-relative or
861 cwd-relative path will be produced.
861 cwd-relative path will be produced.
862
862
863 legacyrelativevalue is the value to use if ui.relative-paths=legacy
863 legacyrelativevalue is the value to use if ui.relative-paths=legacy
864
864
865 If forcerelativevalue is not None, then that value will be used regardless
865 If forcerelativevalue is not None, then that value will be used regardless
866 of what ui.relative-paths is set to.
866 of what ui.relative-paths is set to.
867 """
867 """
868 if forcerelativevalue is not None:
868 if forcerelativevalue is not None:
869 relative = forcerelativevalue
869 relative = forcerelativevalue
870 else:
870 else:
871 config = repo.ui.config(b'ui', b'relative-paths')
871 config = repo.ui.config(b'ui', b'relative-paths')
872 if config == b'legacy':
872 if config == b'legacy':
873 relative = legacyrelativevalue
873 relative = legacyrelativevalue
874 else:
874 else:
875 relative = stringutil.parsebool(config)
875 relative = stringutil.parsebool(config)
876 if relative is None:
876 if relative is None:
877 raise error.ConfigError(
877 raise error.ConfigError(
878 _(b"ui.relative-paths is not a boolean ('%s')") % config
878 _(b"ui.relative-paths is not a boolean ('%s')") % config
879 )
879 )
880
880
881 if relative:
881 if relative:
882 cwd = repo.getcwd()
882 cwd = repo.getcwd()
883 if cwd != b'':
883 if cwd != b'':
884 # this branch would work even if cwd == b'' (ie cwd = repo
884 # this branch would work even if cwd == b'' (ie cwd = repo
885 # root), but its generality makes the returned function slower
885 # root), but its generality makes the returned function slower
886 pathto = repo.pathto
886 pathto = repo.pathto
887 return lambda f: pathto(f, cwd)
887 return lambda f: pathto(f, cwd)
888 if repo.ui.configbool(b'ui', b'slash'):
888 if repo.ui.configbool(b'ui', b'slash'):
889 return lambda f: f
889 return lambda f: f
890 else:
890 else:
891 return util.localpath
891 return util.localpath
892
892
893
893
894 def subdiruipathfn(subpath, uipathfn):
894 def subdiruipathfn(subpath, uipathfn):
895 '''Create a new uipathfn that treats the file as relative to subpath.'''
895 '''Create a new uipathfn that treats the file as relative to subpath.'''
896 return lambda f: uipathfn(posixpath.join(subpath, f))
896 return lambda f: uipathfn(posixpath.join(subpath, f))
897
897
898
898
899 def anypats(pats, opts):
899 def anypats(pats, opts):
900 '''Checks if any patterns, including --include and --exclude were given.
900 '''Checks if any patterns, including --include and --exclude were given.
901
901
902 Some commands (e.g. addremove) use this condition for deciding whether to
902 Some commands (e.g. addremove) use this condition for deciding whether to
903 print absolute or relative paths.
903 print absolute or relative paths.
904 '''
904 '''
905 return bool(pats or opts.get(b'include') or opts.get(b'exclude'))
905 return bool(pats or opts.get(b'include') or opts.get(b'exclude'))
906
906
907
907
908 def expandpats(pats):
908 def expandpats(pats):
909 '''Expand bare globs when running on windows.
909 '''Expand bare globs when running on windows.
910 On posix we assume it already has already been done by sh.'''
910 On posix we assume it already has already been done by sh.'''
911 if not util.expandglobs:
911 if not util.expandglobs:
912 return list(pats)
912 return list(pats)
913 ret = []
913 ret = []
914 for kindpat in pats:
914 for kindpat in pats:
915 kind, pat = matchmod._patsplit(kindpat, None)
915 kind, pat = matchmod._patsplit(kindpat, None)
916 if kind is None:
916 if kind is None:
917 try:
917 try:
918 globbed = glob.glob(pat)
918 globbed = glob.glob(pat)
919 except re.error:
919 except re.error:
920 globbed = [pat]
920 globbed = [pat]
921 if globbed:
921 if globbed:
922 ret.extend(globbed)
922 ret.extend(globbed)
923 continue
923 continue
924 ret.append(kindpat)
924 ret.append(kindpat)
925 return ret
925 return ret
926
926
927
927
928 def matchandpats(
928 def matchandpats(
929 ctx, pats=(), opts=None, globbed=False, default=b'relpath', badfn=None
929 ctx, pats=(), opts=None, globbed=False, default=b'relpath', badfn=None
930 ):
930 ):
931 '''Return a matcher and the patterns that were used.
931 '''Return a matcher and the patterns that were used.
932 The matcher will warn about bad matches, unless an alternate badfn callback
932 The matcher will warn about bad matches, unless an alternate badfn callback
933 is provided.'''
933 is provided.'''
934 if opts is None:
934 if opts is None:
935 opts = {}
935 opts = {}
936 if not globbed and default == b'relpath':
936 if not globbed and default == b'relpath':
937 pats = expandpats(pats or [])
937 pats = expandpats(pats or [])
938
938
939 uipathfn = getuipathfn(ctx.repo(), legacyrelativevalue=True)
939 uipathfn = getuipathfn(ctx.repo(), legacyrelativevalue=True)
940
940
941 def bad(f, msg):
941 def bad(f, msg):
942 ctx.repo().ui.warn(b"%s: %s\n" % (uipathfn(f), msg))
942 ctx.repo().ui.warn(b"%s: %s\n" % (uipathfn(f), msg))
943
943
944 if badfn is None:
944 if badfn is None:
945 badfn = bad
945 badfn = bad
946
946
947 m = ctx.match(
947 m = ctx.match(
948 pats,
948 pats,
949 opts.get(b'include'),
949 opts.get(b'include'),
950 opts.get(b'exclude'),
950 opts.get(b'exclude'),
951 default,
951 default,
952 listsubrepos=opts.get(b'subrepos'),
952 listsubrepos=opts.get(b'subrepos'),
953 badfn=badfn,
953 badfn=badfn,
954 )
954 )
955
955
956 if m.always():
956 if m.always():
957 pats = []
957 pats = []
958 return m, pats
958 return m, pats
959
959
960
960
961 def match(
961 def match(
962 ctx, pats=(), opts=None, globbed=False, default=b'relpath', badfn=None
962 ctx, pats=(), opts=None, globbed=False, default=b'relpath', badfn=None
963 ):
963 ):
964 '''Return a matcher that will warn about bad matches.'''
964 '''Return a matcher that will warn about bad matches.'''
965 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
965 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
966
966
967
967
968 def matchall(repo):
968 def matchall(repo):
969 '''Return a matcher that will efficiently match everything.'''
969 '''Return a matcher that will efficiently match everything.'''
970 return matchmod.always()
970 return matchmod.always()
971
971
972
972
973 def matchfiles(repo, files, badfn=None):
973 def matchfiles(repo, files, badfn=None):
974 '''Return a matcher that will efficiently match exactly these files.'''
974 '''Return a matcher that will efficiently match exactly these files.'''
975 return matchmod.exact(files, badfn=badfn)
975 return matchmod.exact(files, badfn=badfn)
976
976
977
977
978 def parsefollowlinespattern(repo, rev, pat, msg):
978 def parsefollowlinespattern(repo, rev, pat, msg):
979 """Return a file name from `pat` pattern suitable for usage in followlines
979 """Return a file name from `pat` pattern suitable for usage in followlines
980 logic.
980 logic.
981 """
981 """
982 if not matchmod.patkind(pat):
982 if not matchmod.patkind(pat):
983 return pathutil.canonpath(repo.root, repo.getcwd(), pat)
983 return pathutil.canonpath(repo.root, repo.getcwd(), pat)
984 else:
984 else:
985 ctx = repo[rev]
985 ctx = repo[rev]
986 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=ctx)
986 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=ctx)
987 files = [f for f in ctx if m(f)]
987 files = [f for f in ctx if m(f)]
988 if len(files) != 1:
988 if len(files) != 1:
989 raise error.ParseError(msg)
989 raise error.ParseError(msg)
990 return files[0]
990 return files[0]
991
991
992
992
993 def getorigvfs(ui, repo):
993 def getorigvfs(ui, repo):
994 """return a vfs suitable to save 'orig' file
994 """return a vfs suitable to save 'orig' file
995
995
996 return None if no special directory is configured"""
996 return None if no special directory is configured"""
997 origbackuppath = ui.config(b'ui', b'origbackuppath')
997 origbackuppath = ui.config(b'ui', b'origbackuppath')
998 if not origbackuppath:
998 if not origbackuppath:
999 return None
999 return None
1000 return vfs.vfs(repo.wvfs.join(origbackuppath))
1000 return vfs.vfs(repo.wvfs.join(origbackuppath))
1001
1001
1002
1002
1003 def backuppath(ui, repo, filepath):
1003 def backuppath(ui, repo, filepath):
1004 '''customize where working copy backup files (.orig files) are created
1004 '''customize where working copy backup files (.orig files) are created
1005
1005
1006 Fetch user defined path from config file: [ui] origbackuppath = <path>
1006 Fetch user defined path from config file: [ui] origbackuppath = <path>
1007 Fall back to default (filepath with .orig suffix) if not specified
1007 Fall back to default (filepath with .orig suffix) if not specified
1008
1008
1009 filepath is repo-relative
1009 filepath is repo-relative
1010
1010
1011 Returns an absolute path
1011 Returns an absolute path
1012 '''
1012 '''
1013 origvfs = getorigvfs(ui, repo)
1013 origvfs = getorigvfs(ui, repo)
1014 if origvfs is None:
1014 if origvfs is None:
1015 return repo.wjoin(filepath + b".orig")
1015 return repo.wjoin(filepath + b".orig")
1016
1016
1017 origbackupdir = origvfs.dirname(filepath)
1017 origbackupdir = origvfs.dirname(filepath)
1018 if not origvfs.isdir(origbackupdir) or origvfs.islink(origbackupdir):
1018 if not origvfs.isdir(origbackupdir) or origvfs.islink(origbackupdir):
1019 ui.note(_(b'creating directory: %s\n') % origvfs.join(origbackupdir))
1019 ui.note(_(b'creating directory: %s\n') % origvfs.join(origbackupdir))
1020
1020
1021 # Remove any files that conflict with the backup file's path
1021 # Remove any files that conflict with the backup file's path
1022 for f in reversed(list(pathutil.finddirs(filepath))):
1022 for f in reversed(list(pathutil.finddirs(filepath))):
1023 if origvfs.isfileorlink(f):
1023 if origvfs.isfileorlink(f):
1024 ui.note(_(b'removing conflicting file: %s\n') % origvfs.join(f))
1024 ui.note(_(b'removing conflicting file: %s\n') % origvfs.join(f))
1025 origvfs.unlink(f)
1025 origvfs.unlink(f)
1026 break
1026 break
1027
1027
1028 origvfs.makedirs(origbackupdir)
1028 origvfs.makedirs(origbackupdir)
1029
1029
1030 if origvfs.isdir(filepath) and not origvfs.islink(filepath):
1030 if origvfs.isdir(filepath) and not origvfs.islink(filepath):
1031 ui.note(
1031 ui.note(
1032 _(b'removing conflicting directory: %s\n') % origvfs.join(filepath)
1032 _(b'removing conflicting directory: %s\n') % origvfs.join(filepath)
1033 )
1033 )
1034 origvfs.rmtree(filepath, forcibly=True)
1034 origvfs.rmtree(filepath, forcibly=True)
1035
1035
1036 return origvfs.join(filepath)
1036 return origvfs.join(filepath)
1037
1037
1038
1038
1039 class _containsnode(object):
1039 class _containsnode(object):
1040 """proxy __contains__(node) to container.__contains__ which accepts revs"""
1040 """proxy __contains__(node) to container.__contains__ which accepts revs"""
1041
1041
1042 def __init__(self, repo, revcontainer):
1042 def __init__(self, repo, revcontainer):
1043 self._torev = repo.changelog.rev
1043 self._torev = repo.changelog.rev
1044 self._revcontains = revcontainer.__contains__
1044 self._revcontains = revcontainer.__contains__
1045
1045
1046 def __contains__(self, node):
1046 def __contains__(self, node):
1047 return self._revcontains(self._torev(node))
1047 return self._revcontains(self._torev(node))
1048
1048
1049
1049
1050 def cleanupnodes(
1050 def cleanupnodes(
1051 repo,
1051 repo,
1052 replacements,
1052 replacements,
1053 operation,
1053 operation,
1054 moves=None,
1054 moves=None,
1055 metadata=None,
1055 metadata=None,
1056 fixphase=False,
1056 fixphase=False,
1057 targetphase=None,
1057 targetphase=None,
1058 backup=True,
1058 backup=True,
1059 ):
1059 ):
1060 """do common cleanups when old nodes are replaced by new nodes
1060 """do common cleanups when old nodes are replaced by new nodes
1061
1061
1062 That includes writing obsmarkers or stripping nodes, and moving bookmarks.
1062 That includes writing obsmarkers or stripping nodes, and moving bookmarks.
1063 (we might also want to move working directory parent in the future)
1063 (we might also want to move working directory parent in the future)
1064
1064
1065 By default, bookmark moves are calculated automatically from 'replacements',
1065 By default, bookmark moves are calculated automatically from 'replacements',
1066 but 'moves' can be used to override that. Also, 'moves' may include
1066 but 'moves' can be used to override that. Also, 'moves' may include
1067 additional bookmark moves that should not have associated obsmarkers.
1067 additional bookmark moves that should not have associated obsmarkers.
1068
1068
1069 replacements is {oldnode: [newnode]} or a iterable of nodes if they do not
1069 replacements is {oldnode: [newnode]} or a iterable of nodes if they do not
1070 have replacements. operation is a string, like "rebase".
1070 have replacements. operation is a string, like "rebase".
1071
1071
1072 metadata is dictionary containing metadata to be stored in obsmarker if
1072 metadata is dictionary containing metadata to be stored in obsmarker if
1073 obsolescence is enabled.
1073 obsolescence is enabled.
1074 """
1074 """
1075 assert fixphase or targetphase is None
1075 assert fixphase or targetphase is None
1076 if not replacements and not moves:
1076 if not replacements and not moves:
1077 return
1077 return
1078
1078
1079 # translate mapping's other forms
1079 # translate mapping's other forms
1080 if not util.safehasattr(replacements, b'items'):
1080 if not util.safehasattr(replacements, b'items'):
1081 replacements = {(n,): () for n in replacements}
1081 replacements = {(n,): () for n in replacements}
1082 else:
1082 else:
1083 # upgrading non tuple "source" to tuple ones for BC
1083 # upgrading non tuple "source" to tuple ones for BC
1084 repls = {}
1084 repls = {}
1085 for key, value in replacements.items():
1085 for key, value in replacements.items():
1086 if not isinstance(key, tuple):
1086 if not isinstance(key, tuple):
1087 key = (key,)
1087 key = (key,)
1088 repls[key] = value
1088 repls[key] = value
1089 replacements = repls
1089 replacements = repls
1090
1090
1091 # Unfiltered repo is needed since nodes in replacements might be hidden.
1091 # Unfiltered repo is needed since nodes in replacements might be hidden.
1092 unfi = repo.unfiltered()
1092 unfi = repo.unfiltered()
1093
1093
1094 # Calculate bookmark movements
1094 # Calculate bookmark movements
1095 if moves is None:
1095 if moves is None:
1096 moves = {}
1096 moves = {}
1097 for oldnodes, newnodes in replacements.items():
1097 for oldnodes, newnodes in replacements.items():
1098 for oldnode in oldnodes:
1098 for oldnode in oldnodes:
1099 if oldnode in moves:
1099 if oldnode in moves:
1100 continue
1100 continue
1101 if len(newnodes) > 1:
1101 if len(newnodes) > 1:
1102 # usually a split, take the one with biggest rev number
1102 # usually a split, take the one with biggest rev number
1103 newnode = next(unfi.set(b'max(%ln)', newnodes)).node()
1103 newnode = next(unfi.set(b'max(%ln)', newnodes)).node()
1104 elif len(newnodes) == 0:
1104 elif len(newnodes) == 0:
1105 # move bookmark backwards
1105 # move bookmark backwards
1106 allreplaced = []
1106 allreplaced = []
1107 for rep in replacements:
1107 for rep in replacements:
1108 allreplaced.extend(rep)
1108 allreplaced.extend(rep)
1109 roots = list(
1109 roots = list(
1110 unfi.set(b'max((::%n) - %ln)', oldnode, allreplaced)
1110 unfi.set(b'max((::%n) - %ln)', oldnode, allreplaced)
1111 )
1111 )
1112 if roots:
1112 if roots:
1113 newnode = roots[0].node()
1113 newnode = roots[0].node()
1114 else:
1114 else:
1115 newnode = nullid
1115 newnode = nullid
1116 else:
1116 else:
1117 newnode = newnodes[0]
1117 newnode = newnodes[0]
1118 moves[oldnode] = newnode
1118 moves[oldnode] = newnode
1119
1119
1120 allnewnodes = [n for ns in replacements.values() for n in ns]
1120 allnewnodes = [n for ns in replacements.values() for n in ns]
1121 toretract = {}
1121 toretract = {}
1122 toadvance = {}
1122 toadvance = {}
1123 if fixphase:
1123 if fixphase:
1124 precursors = {}
1124 precursors = {}
1125 for oldnodes, newnodes in replacements.items():
1125 for oldnodes, newnodes in replacements.items():
1126 for oldnode in oldnodes:
1126 for oldnode in oldnodes:
1127 for newnode in newnodes:
1127 for newnode in newnodes:
1128 precursors.setdefault(newnode, []).append(oldnode)
1128 precursors.setdefault(newnode, []).append(oldnode)
1129
1129
1130 allnewnodes.sort(key=lambda n: unfi[n].rev())
1130 allnewnodes.sort(key=lambda n: unfi[n].rev())
1131 newphases = {}
1131 newphases = {}
1132
1132
1133 def phase(ctx):
1133 def phase(ctx):
1134 return newphases.get(ctx.node(), ctx.phase())
1134 return newphases.get(ctx.node(), ctx.phase())
1135
1135
1136 for newnode in allnewnodes:
1136 for newnode in allnewnodes:
1137 ctx = unfi[newnode]
1137 ctx = unfi[newnode]
1138 parentphase = max(phase(p) for p in ctx.parents())
1138 parentphase = max(phase(p) for p in ctx.parents())
1139 if targetphase is None:
1139 if targetphase is None:
1140 oldphase = max(
1140 oldphase = max(
1141 unfi[oldnode].phase() for oldnode in precursors[newnode]
1141 unfi[oldnode].phase() for oldnode in precursors[newnode]
1142 )
1142 )
1143 newphase = max(oldphase, parentphase)
1143 newphase = max(oldphase, parentphase)
1144 else:
1144 else:
1145 newphase = max(targetphase, parentphase)
1145 newphase = max(targetphase, parentphase)
1146 newphases[newnode] = newphase
1146 newphases[newnode] = newphase
1147 if newphase > ctx.phase():
1147 if newphase > ctx.phase():
1148 toretract.setdefault(newphase, []).append(newnode)
1148 toretract.setdefault(newphase, []).append(newnode)
1149 elif newphase < ctx.phase():
1149 elif newphase < ctx.phase():
1150 toadvance.setdefault(newphase, []).append(newnode)
1150 toadvance.setdefault(newphase, []).append(newnode)
1151
1151
1152 with repo.transaction(b'cleanup') as tr:
1152 with repo.transaction(b'cleanup') as tr:
1153 # Move bookmarks
1153 # Move bookmarks
1154 bmarks = repo._bookmarks
1154 bmarks = repo._bookmarks
1155 bmarkchanges = []
1155 bmarkchanges = []
1156 for oldnode, newnode in moves.items():
1156 for oldnode, newnode in moves.items():
1157 oldbmarks = repo.nodebookmarks(oldnode)
1157 oldbmarks = repo.nodebookmarks(oldnode)
1158 if not oldbmarks:
1158 if not oldbmarks:
1159 continue
1159 continue
1160 from . import bookmarks # avoid import cycle
1160 from . import bookmarks # avoid import cycle
1161
1161
1162 repo.ui.debug(
1162 repo.ui.debug(
1163 b'moving bookmarks %r from %s to %s\n'
1163 b'moving bookmarks %r from %s to %s\n'
1164 % (
1164 % (
1165 pycompat.rapply(pycompat.maybebytestr, oldbmarks),
1165 pycompat.rapply(pycompat.maybebytestr, oldbmarks),
1166 hex(oldnode),
1166 hex(oldnode),
1167 hex(newnode),
1167 hex(newnode),
1168 )
1168 )
1169 )
1169 )
1170 # Delete divergent bookmarks being parents of related newnodes
1170 # Delete divergent bookmarks being parents of related newnodes
1171 deleterevs = repo.revs(
1171 deleterevs = repo.revs(
1172 b'parents(roots(%ln & (::%n))) - parents(%n)',
1172 b'parents(roots(%ln & (::%n))) - parents(%n)',
1173 allnewnodes,
1173 allnewnodes,
1174 newnode,
1174 newnode,
1175 oldnode,
1175 oldnode,
1176 )
1176 )
1177 deletenodes = _containsnode(repo, deleterevs)
1177 deletenodes = _containsnode(repo, deleterevs)
1178 for name in oldbmarks:
1178 for name in oldbmarks:
1179 bmarkchanges.append((name, newnode))
1179 bmarkchanges.append((name, newnode))
1180 for b in bookmarks.divergent2delete(repo, deletenodes, name):
1180 for b in bookmarks.divergent2delete(repo, deletenodes, name):
1181 bmarkchanges.append((b, None))
1181 bmarkchanges.append((b, None))
1182
1182
1183 if bmarkchanges:
1183 if bmarkchanges:
1184 bmarks.applychanges(repo, tr, bmarkchanges)
1184 bmarks.applychanges(repo, tr, bmarkchanges)
1185
1185
1186 for phase, nodes in toretract.items():
1186 for phase, nodes in toretract.items():
1187 phases.retractboundary(repo, tr, phase, nodes)
1187 phases.retractboundary(repo, tr, phase, nodes)
1188 for phase, nodes in toadvance.items():
1188 for phase, nodes in toadvance.items():
1189 phases.advanceboundary(repo, tr, phase, nodes)
1189 phases.advanceboundary(repo, tr, phase, nodes)
1190
1190
1191 mayusearchived = repo.ui.config(b'experimental', b'cleanup-as-archived')
1191 mayusearchived = repo.ui.config(b'experimental', b'cleanup-as-archived')
1192 # Obsolete or strip nodes
1192 # Obsolete or strip nodes
1193 if obsolete.isenabled(repo, obsolete.createmarkersopt):
1193 if obsolete.isenabled(repo, obsolete.createmarkersopt):
1194 # If a node is already obsoleted, and we want to obsolete it
1194 # If a node is already obsoleted, and we want to obsolete it
1195 # without a successor, skip that obssolete request since it's
1195 # without a successor, skip that obssolete request since it's
1196 # unnecessary. That's the "if s or not isobs(n)" check below.
1196 # unnecessary. That's the "if s or not isobs(n)" check below.
1197 # Also sort the node in topology order, that might be useful for
1197 # Also sort the node in topology order, that might be useful for
1198 # some obsstore logic.
1198 # some obsstore logic.
1199 # NOTE: the sorting might belong to createmarkers.
1199 # NOTE: the sorting might belong to createmarkers.
1200 torev = unfi.changelog.rev
1200 torev = unfi.changelog.rev
1201 sortfunc = lambda ns: torev(ns[0][0])
1201 sortfunc = lambda ns: torev(ns[0][0])
1202 rels = []
1202 rels = []
1203 for ns, s in sorted(replacements.items(), key=sortfunc):
1203 for ns, s in sorted(replacements.items(), key=sortfunc):
1204 rel = (tuple(unfi[n] for n in ns), tuple(unfi[m] for m in s))
1204 rel = (tuple(unfi[n] for n in ns), tuple(unfi[m] for m in s))
1205 rels.append(rel)
1205 rels.append(rel)
1206 if rels:
1206 if rels:
1207 obsolete.createmarkers(
1207 obsolete.createmarkers(
1208 repo, rels, operation=operation, metadata=metadata
1208 repo, rels, operation=operation, metadata=metadata
1209 )
1209 )
1210 elif phases.supportinternal(repo) and mayusearchived:
1210 elif phases.supportinternal(repo) and mayusearchived:
1211 # this assume we do not have "unstable" nodes above the cleaned ones
1211 # this assume we do not have "unstable" nodes above the cleaned ones
1212 allreplaced = set()
1212 allreplaced = set()
1213 for ns in replacements.keys():
1213 for ns in replacements.keys():
1214 allreplaced.update(ns)
1214 allreplaced.update(ns)
1215 if backup:
1215 if backup:
1216 from . import repair # avoid import cycle
1216 from . import repair # avoid import cycle
1217
1217
1218 node = min(allreplaced, key=repo.changelog.rev)
1218 node = min(allreplaced, key=repo.changelog.rev)
1219 repair.backupbundle(
1219 repair.backupbundle(
1220 repo, allreplaced, allreplaced, node, operation
1220 repo, allreplaced, allreplaced, node, operation
1221 )
1221 )
1222 phases.retractboundary(repo, tr, phases.archived, allreplaced)
1222 phases.retractboundary(repo, tr, phases.archived, allreplaced)
1223 else:
1223 else:
1224 from . import repair # avoid import cycle
1224 from . import repair # avoid import cycle
1225
1225
1226 tostrip = list(n for ns in replacements for n in ns)
1226 tostrip = list(n for ns in replacements for n in ns)
1227 if tostrip:
1227 if tostrip:
1228 repair.delayedstrip(
1228 repair.delayedstrip(
1229 repo.ui, repo, tostrip, operation, backup=backup
1229 repo.ui, repo, tostrip, operation, backup=backup
1230 )
1230 )
1231
1231
1232
1232
1233 def addremove(repo, matcher, prefix, uipathfn, opts=None):
1233 def addremove(repo, matcher, prefix, uipathfn, opts=None):
1234 if opts is None:
1234 if opts is None:
1235 opts = {}
1235 opts = {}
1236 m = matcher
1236 m = matcher
1237 dry_run = opts.get(b'dry_run')
1237 dry_run = opts.get(b'dry_run')
1238 try:
1238 try:
1239 similarity = float(opts.get(b'similarity') or 0)
1239 similarity = float(opts.get(b'similarity') or 0)
1240 except ValueError:
1240 except ValueError:
1241 raise error.Abort(_(b'similarity must be a number'))
1241 raise error.Abort(_(b'similarity must be a number'))
1242 if similarity < 0 or similarity > 100:
1242 if similarity < 0 or similarity > 100:
1243 raise error.Abort(_(b'similarity must be between 0 and 100'))
1243 raise error.Abort(_(b'similarity must be between 0 and 100'))
1244 similarity /= 100.0
1244 similarity /= 100.0
1245
1245
1246 ret = 0
1246 ret = 0
1247
1247
1248 wctx = repo[None]
1248 wctx = repo[None]
1249 for subpath in sorted(wctx.substate):
1249 for subpath in sorted(wctx.substate):
1250 submatch = matchmod.subdirmatcher(subpath, m)
1250 submatch = matchmod.subdirmatcher(subpath, m)
1251 if opts.get(b'subrepos') or m.exact(subpath) or any(submatch.files()):
1251 if opts.get(b'subrepos') or m.exact(subpath) or any(submatch.files()):
1252 sub = wctx.sub(subpath)
1252 sub = wctx.sub(subpath)
1253 subprefix = repo.wvfs.reljoin(prefix, subpath)
1253 subprefix = repo.wvfs.reljoin(prefix, subpath)
1254 subuipathfn = subdiruipathfn(subpath, uipathfn)
1254 subuipathfn = subdiruipathfn(subpath, uipathfn)
1255 try:
1255 try:
1256 if sub.addremove(submatch, subprefix, subuipathfn, opts):
1256 if sub.addremove(submatch, subprefix, subuipathfn, opts):
1257 ret = 1
1257 ret = 1
1258 except error.LookupError:
1258 except error.LookupError:
1259 repo.ui.status(
1259 repo.ui.status(
1260 _(b"skipping missing subrepository: %s\n")
1260 _(b"skipping missing subrepository: %s\n")
1261 % uipathfn(subpath)
1261 % uipathfn(subpath)
1262 )
1262 )
1263
1263
1264 rejected = []
1264 rejected = []
1265
1265
1266 def badfn(f, msg):
1266 def badfn(f, msg):
1267 if f in m.files():
1267 if f in m.files():
1268 m.bad(f, msg)
1268 m.bad(f, msg)
1269 rejected.append(f)
1269 rejected.append(f)
1270
1270
1271 badmatch = matchmod.badmatch(m, badfn)
1271 badmatch = matchmod.badmatch(m, badfn)
1272 added, unknown, deleted, removed, forgotten = _interestingfiles(
1272 added, unknown, deleted, removed, forgotten = _interestingfiles(
1273 repo, badmatch
1273 repo, badmatch
1274 )
1274 )
1275
1275
1276 unknownset = set(unknown + forgotten)
1276 unknownset = set(unknown + forgotten)
1277 toprint = unknownset.copy()
1277 toprint = unknownset.copy()
1278 toprint.update(deleted)
1278 toprint.update(deleted)
1279 for abs in sorted(toprint):
1279 for abs in sorted(toprint):
1280 if repo.ui.verbose or not m.exact(abs):
1280 if repo.ui.verbose or not m.exact(abs):
1281 if abs in unknownset:
1281 if abs in unknownset:
1282 status = _(b'adding %s\n') % uipathfn(abs)
1282 status = _(b'adding %s\n') % uipathfn(abs)
1283 label = b'ui.addremove.added'
1283 label = b'ui.addremove.added'
1284 else:
1284 else:
1285 status = _(b'removing %s\n') % uipathfn(abs)
1285 status = _(b'removing %s\n') % uipathfn(abs)
1286 label = b'ui.addremove.removed'
1286 label = b'ui.addremove.removed'
1287 repo.ui.status(status, label=label)
1287 repo.ui.status(status, label=label)
1288
1288
1289 renames = _findrenames(
1289 renames = _findrenames(
1290 repo, m, added + unknown, removed + deleted, similarity, uipathfn
1290 repo, m, added + unknown, removed + deleted, similarity, uipathfn
1291 )
1291 )
1292
1292
1293 if not dry_run:
1293 if not dry_run:
1294 _markchanges(repo, unknown + forgotten, deleted, renames)
1294 _markchanges(repo, unknown + forgotten, deleted, renames)
1295
1295
1296 for f in rejected:
1296 for f in rejected:
1297 if f in m.files():
1297 if f in m.files():
1298 return 1
1298 return 1
1299 return ret
1299 return ret
1300
1300
1301
1301
1302 def marktouched(repo, files, similarity=0.0):
1302 def marktouched(repo, files, similarity=0.0):
1303 '''Assert that files have somehow been operated upon. files are relative to
1303 '''Assert that files have somehow been operated upon. files are relative to
1304 the repo root.'''
1304 the repo root.'''
1305 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
1305 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
1306 rejected = []
1306 rejected = []
1307
1307
1308 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
1308 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
1309
1309
1310 if repo.ui.verbose:
1310 if repo.ui.verbose:
1311 unknownset = set(unknown + forgotten)
1311 unknownset = set(unknown + forgotten)
1312 toprint = unknownset.copy()
1312 toprint = unknownset.copy()
1313 toprint.update(deleted)
1313 toprint.update(deleted)
1314 for abs in sorted(toprint):
1314 for abs in sorted(toprint):
1315 if abs in unknownset:
1315 if abs in unknownset:
1316 status = _(b'adding %s\n') % abs
1316 status = _(b'adding %s\n') % abs
1317 else:
1317 else:
1318 status = _(b'removing %s\n') % abs
1318 status = _(b'removing %s\n') % abs
1319 repo.ui.status(status)
1319 repo.ui.status(status)
1320
1320
1321 # TODO: We should probably have the caller pass in uipathfn and apply it to
1321 # TODO: We should probably have the caller pass in uipathfn and apply it to
1322 # the messages above too. legacyrelativevalue=True is consistent with how
1322 # the messages above too. legacyrelativevalue=True is consistent with how
1323 # it used to work.
1323 # it used to work.
1324 uipathfn = getuipathfn(repo, legacyrelativevalue=True)
1324 uipathfn = getuipathfn(repo, legacyrelativevalue=True)
1325 renames = _findrenames(
1325 renames = _findrenames(
1326 repo, m, added + unknown, removed + deleted, similarity, uipathfn
1326 repo, m, added + unknown, removed + deleted, similarity, uipathfn
1327 )
1327 )
1328
1328
1329 _markchanges(repo, unknown + forgotten, deleted, renames)
1329 _markchanges(repo, unknown + forgotten, deleted, renames)
1330
1330
1331 for f in rejected:
1331 for f in rejected:
1332 if f in m.files():
1332 if f in m.files():
1333 return 1
1333 return 1
1334 return 0
1334 return 0
1335
1335
1336
1336
1337 def _interestingfiles(repo, matcher):
1337 def _interestingfiles(repo, matcher):
1338 '''Walk dirstate with matcher, looking for files that addremove would care
1338 '''Walk dirstate with matcher, looking for files that addremove would care
1339 about.
1339 about.
1340
1340
1341 This is different from dirstate.status because it doesn't care about
1341 This is different from dirstate.status because it doesn't care about
1342 whether files are modified or clean.'''
1342 whether files are modified or clean.'''
1343 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1343 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1344 audit_path = pathutil.pathauditor(repo.root, cached=True)
1344 audit_path = pathutil.pathauditor(repo.root, cached=True)
1345
1345
1346 ctx = repo[None]
1346 ctx = repo[None]
1347 dirstate = repo.dirstate
1347 dirstate = repo.dirstate
1348 matcher = repo.narrowmatch(matcher, includeexact=True)
1348 matcher = repo.narrowmatch(matcher, includeexact=True)
1349 walkresults = dirstate.walk(
1349 walkresults = dirstate.walk(
1350 matcher,
1350 matcher,
1351 subrepos=sorted(ctx.substate),
1351 subrepos=sorted(ctx.substate),
1352 unknown=True,
1352 unknown=True,
1353 ignored=False,
1353 ignored=False,
1354 full=False,
1354 full=False,
1355 )
1355 )
1356 for abs, st in pycompat.iteritems(walkresults):
1356 for abs, st in pycompat.iteritems(walkresults):
1357 dstate = dirstate[abs]
1357 dstate = dirstate[abs]
1358 if dstate == b'?' and audit_path.check(abs):
1358 if dstate == b'?' and audit_path.check(abs):
1359 unknown.append(abs)
1359 unknown.append(abs)
1360 elif dstate != b'r' and not st:
1360 elif dstate != b'r' and not st:
1361 deleted.append(abs)
1361 deleted.append(abs)
1362 elif dstate == b'r' and st:
1362 elif dstate == b'r' and st:
1363 forgotten.append(abs)
1363 forgotten.append(abs)
1364 # for finding renames
1364 # for finding renames
1365 elif dstate == b'r' and not st:
1365 elif dstate == b'r' and not st:
1366 removed.append(abs)
1366 removed.append(abs)
1367 elif dstate == b'a':
1367 elif dstate == b'a':
1368 added.append(abs)
1368 added.append(abs)
1369
1369
1370 return added, unknown, deleted, removed, forgotten
1370 return added, unknown, deleted, removed, forgotten
1371
1371
1372
1372
1373 def _findrenames(repo, matcher, added, removed, similarity, uipathfn):
1373 def _findrenames(repo, matcher, added, removed, similarity, uipathfn):
1374 '''Find renames from removed files to added ones.'''
1374 '''Find renames from removed files to added ones.'''
1375 renames = {}
1375 renames = {}
1376 if similarity > 0:
1376 if similarity > 0:
1377 for old, new, score in similar.findrenames(
1377 for old, new, score in similar.findrenames(
1378 repo, added, removed, similarity
1378 repo, added, removed, similarity
1379 ):
1379 ):
1380 if (
1380 if (
1381 repo.ui.verbose
1381 repo.ui.verbose
1382 or not matcher.exact(old)
1382 or not matcher.exact(old)
1383 or not matcher.exact(new)
1383 or not matcher.exact(new)
1384 ):
1384 ):
1385 repo.ui.status(
1385 repo.ui.status(
1386 _(
1386 _(
1387 b'recording removal of %s as rename to %s '
1387 b'recording removal of %s as rename to %s '
1388 b'(%d%% similar)\n'
1388 b'(%d%% similar)\n'
1389 )
1389 )
1390 % (uipathfn(old), uipathfn(new), score * 100)
1390 % (uipathfn(old), uipathfn(new), score * 100)
1391 )
1391 )
1392 renames[new] = old
1392 renames[new] = old
1393 return renames
1393 return renames
1394
1394
1395
1395
1396 def _markchanges(repo, unknown, deleted, renames):
1396 def _markchanges(repo, unknown, deleted, renames):
1397 '''Marks the files in unknown as added, the files in deleted as removed,
1397 '''Marks the files in unknown as added, the files in deleted as removed,
1398 and the files in renames as copied.'''
1398 and the files in renames as copied.'''
1399 wctx = repo[None]
1399 wctx = repo[None]
1400 with repo.wlock():
1400 with repo.wlock():
1401 wctx.forget(deleted)
1401 wctx.forget(deleted)
1402 wctx.add(unknown)
1402 wctx.add(unknown)
1403 for new, old in pycompat.iteritems(renames):
1403 for new, old in pycompat.iteritems(renames):
1404 wctx.copy(old, new)
1404 wctx.copy(old, new)
1405
1405
1406
1406
1407 def getrenamedfn(repo, endrev=None):
1407 def getrenamedfn(repo, endrev=None):
1408 if copiesmod.usechangesetcentricalgo(repo):
1408 if copiesmod.usechangesetcentricalgo(repo):
1409
1409
1410 def getrenamed(fn, rev):
1410 def getrenamed(fn, rev):
1411 ctx = repo[rev]
1411 ctx = repo[rev]
1412 p1copies = ctx.p1copies()
1412 p1copies = ctx.p1copies()
1413 if fn in p1copies:
1413 if fn in p1copies:
1414 return p1copies[fn]
1414 return p1copies[fn]
1415 p2copies = ctx.p2copies()
1415 p2copies = ctx.p2copies()
1416 if fn in p2copies:
1416 if fn in p2copies:
1417 return p2copies[fn]
1417 return p2copies[fn]
1418 return None
1418 return None
1419
1419
1420 return getrenamed
1420 return getrenamed
1421
1421
1422 rcache = {}
1422 rcache = {}
1423 if endrev is None:
1423 if endrev is None:
1424 endrev = len(repo)
1424 endrev = len(repo)
1425
1425
1426 def getrenamed(fn, rev):
1426 def getrenamed(fn, rev):
1427 '''looks up all renames for a file (up to endrev) the first
1427 '''looks up all renames for a file (up to endrev) the first
1428 time the file is given. It indexes on the changerev and only
1428 time the file is given. It indexes on the changerev and only
1429 parses the manifest if linkrev != changerev.
1429 parses the manifest if linkrev != changerev.
1430 Returns rename info for fn at changerev rev.'''
1430 Returns rename info for fn at changerev rev.'''
1431 if fn not in rcache:
1431 if fn not in rcache:
1432 rcache[fn] = {}
1432 rcache[fn] = {}
1433 fl = repo.file(fn)
1433 fl = repo.file(fn)
1434 for i in fl:
1434 for i in fl:
1435 lr = fl.linkrev(i)
1435 lr = fl.linkrev(i)
1436 renamed = fl.renamed(fl.node(i))
1436 renamed = fl.renamed(fl.node(i))
1437 rcache[fn][lr] = renamed and renamed[0]
1437 rcache[fn][lr] = renamed and renamed[0]
1438 if lr >= endrev:
1438 if lr >= endrev:
1439 break
1439 break
1440 if rev in rcache[fn]:
1440 if rev in rcache[fn]:
1441 return rcache[fn][rev]
1441 return rcache[fn][rev]
1442
1442
1443 # If linkrev != rev (i.e. rev not found in rcache) fallback to
1443 # If linkrev != rev (i.e. rev not found in rcache) fallback to
1444 # filectx logic.
1444 # filectx logic.
1445 try:
1445 try:
1446 return repo[rev][fn].copysource()
1446 return repo[rev][fn].copysource()
1447 except error.LookupError:
1447 except error.LookupError:
1448 return None
1448 return None
1449
1449
1450 return getrenamed
1450 return getrenamed
1451
1451
1452
1452
1453 def getcopiesfn(repo, endrev=None):
1453 def getcopiesfn(repo, endrev=None):
1454 if copiesmod.usechangesetcentricalgo(repo):
1454 if copiesmod.usechangesetcentricalgo(repo):
1455
1455
1456 def copiesfn(ctx):
1456 def copiesfn(ctx):
1457 if ctx.p2copies():
1457 if ctx.p2copies():
1458 allcopies = ctx.p1copies().copy()
1458 allcopies = ctx.p1copies().copy()
1459 # There should be no overlap
1459 # There should be no overlap
1460 allcopies.update(ctx.p2copies())
1460 allcopies.update(ctx.p2copies())
1461 return sorted(allcopies.items())
1461 return sorted(allcopies.items())
1462 else:
1462 else:
1463 return sorted(ctx.p1copies().items())
1463 return sorted(ctx.p1copies().items())
1464
1464
1465 else:
1465 else:
1466 getrenamed = getrenamedfn(repo, endrev)
1466 getrenamed = getrenamedfn(repo, endrev)
1467
1467
1468 def copiesfn(ctx):
1468 def copiesfn(ctx):
1469 copies = []
1469 copies = []
1470 for fn in ctx.files():
1470 for fn in ctx.files():
1471 rename = getrenamed(fn, ctx.rev())
1471 rename = getrenamed(fn, ctx.rev())
1472 if rename:
1472 if rename:
1473 copies.append((fn, rename))
1473 copies.append((fn, rename))
1474 return copies
1474 return copies
1475
1475
1476 return copiesfn
1476 return copiesfn
1477
1477
1478
1478
1479 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1479 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1480 """Update the dirstate to reflect the intent of copying src to dst. For
1480 """Update the dirstate to reflect the intent of copying src to dst. For
1481 different reasons it might not end with dst being marked as copied from src.
1481 different reasons it might not end with dst being marked as copied from src.
1482 """
1482 """
1483 origsrc = repo.dirstate.copied(src) or src
1483 origsrc = repo.dirstate.copied(src) or src
1484 if dst == origsrc: # copying back a copy?
1484 if dst == origsrc: # copying back a copy?
1485 if repo.dirstate[dst] not in b'mn' and not dryrun:
1485 if repo.dirstate[dst] not in b'mn' and not dryrun:
1486 repo.dirstate.normallookup(dst)
1486 repo.dirstate.normallookup(dst)
1487 else:
1487 else:
1488 if repo.dirstate[origsrc] == b'a' and origsrc == src:
1488 if repo.dirstate[origsrc] == b'a' and origsrc == src:
1489 if not ui.quiet:
1489 if not ui.quiet:
1490 ui.warn(
1490 ui.warn(
1491 _(
1491 _(
1492 b"%s has not been committed yet, so no copy "
1492 b"%s has not been committed yet, so no copy "
1493 b"data will be stored for %s.\n"
1493 b"data will be stored for %s.\n"
1494 )
1494 )
1495 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd))
1495 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd))
1496 )
1496 )
1497 if repo.dirstate[dst] in b'?r' and not dryrun:
1497 if repo.dirstate[dst] in b'?r' and not dryrun:
1498 wctx.add([dst])
1498 wctx.add([dst])
1499 elif not dryrun:
1499 elif not dryrun:
1500 wctx.copy(origsrc, dst)
1500 wctx.copy(origsrc, dst)
1501
1501
1502
1502
1503 def movedirstate(repo, newctx, match=None):
1503 def movedirstate(repo, newctx, match=None):
1504 """Move the dirstate to newctx and adjust it as necessary.
1504 """Move the dirstate to newctx and adjust it as necessary.
1505
1505
1506 A matcher can be provided as an optimization. It is probably a bug to pass
1506 A matcher can be provided as an optimization. It is probably a bug to pass
1507 a matcher that doesn't match all the differences between the parent of the
1507 a matcher that doesn't match all the differences between the parent of the
1508 working copy and newctx.
1508 working copy and newctx.
1509 """
1509 """
1510 oldctx = repo[b'.']
1510 oldctx = repo[b'.']
1511 ds = repo.dirstate
1511 ds = repo.dirstate
1512 copies = dict(ds.copies())
1512 copies = dict(ds.copies())
1513 ds.setparents(newctx.node(), nullid)
1513 ds.setparents(newctx.node(), nullid)
1514 s = newctx.status(oldctx, match=match)
1514 s = newctx.status(oldctx, match=match)
1515 for f in s.modified:
1515 for f in s.modified:
1516 if ds[f] == b'r':
1516 if ds[f] == b'r':
1517 # modified + removed -> removed
1517 # modified + removed -> removed
1518 continue
1518 continue
1519 ds.normallookup(f)
1519 ds.normallookup(f)
1520
1520
1521 for f in s.added:
1521 for f in s.added:
1522 if ds[f] == b'r':
1522 if ds[f] == b'r':
1523 # added + removed -> unknown
1523 # added + removed -> unknown
1524 ds.drop(f)
1524 ds.drop(f)
1525 elif ds[f] != b'a':
1525 elif ds[f] != b'a':
1526 ds.add(f)
1526 ds.add(f)
1527
1527
1528 for f in s.removed:
1528 for f in s.removed:
1529 if ds[f] == b'a':
1529 if ds[f] == b'a':
1530 # removed + added -> normal
1530 # removed + added -> normal
1531 ds.normallookup(f)
1531 ds.normallookup(f)
1532 elif ds[f] != b'r':
1532 elif ds[f] != b'r':
1533 ds.remove(f)
1533 ds.remove(f)
1534
1534
1535 # Merge old parent and old working dir copies
1535 # Merge old parent and old working dir copies
1536 oldcopies = copiesmod.pathcopies(newctx, oldctx, match)
1536 oldcopies = copiesmod.pathcopies(newctx, oldctx, match)
1537 oldcopies.update(copies)
1537 oldcopies.update(copies)
1538 copies = {
1538 copies = {
1539 dst: oldcopies.get(src, src)
1539 dst: oldcopies.get(src, src)
1540 for dst, src in pycompat.iteritems(oldcopies)
1540 for dst, src in pycompat.iteritems(oldcopies)
1541 }
1541 }
1542 # Adjust the dirstate copies
1542 # Adjust the dirstate copies
1543 for dst, src in pycompat.iteritems(copies):
1543 for dst, src in pycompat.iteritems(copies):
1544 if src not in newctx or dst in newctx or ds[dst] != b'a':
1544 if src not in newctx or dst in newctx or ds[dst] != b'a':
1545 src = None
1545 src = None
1546 ds.copy(src, dst)
1546 ds.copy(src, dst)
1547 repo._quick_access_changeid_invalidate()
1547 repo._quick_access_changeid_invalidate()
1548
1548
1549
1549
1550 def filterrequirements(requirements):
1550 def filterrequirements(requirements):
1551 """ filters the requirements into two sets:
1551 """ filters the requirements into two sets:
1552
1552
1553 wcreq: requirements which should be written in .hg/requires
1553 wcreq: requirements which should be written in .hg/requires
1554 storereq: which should be written in .hg/store/requires
1554 storereq: which should be written in .hg/store/requires
1555
1555
1556 Returns (wcreq, storereq)
1556 Returns (wcreq, storereq)
1557 """
1557 """
1558 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
1558 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
1559 wc, store = set(), set()
1559 wc, store = set(), set()
1560 for r in requirements:
1560 for r in requirements:
1561 if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
1561 if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
1562 wc.add(r)
1562 wc.add(r)
1563 else:
1563 else:
1564 store.add(r)
1564 store.add(r)
1565 return wc, store
1565 return wc, store
1566 return requirements, None
1566 return requirements, None
1567
1567
1568
1568
1569 def istreemanifest(repo):
1569 def istreemanifest(repo):
1570 """ returns whether the repository is using treemanifest or not """
1570 """ returns whether the repository is using treemanifest or not """
1571 return requirementsmod.TREEMANIFEST_REQUIREMENT in repo.requirements
1571 return requirementsmod.TREEMANIFEST_REQUIREMENT in repo.requirements
1572
1572
1573
1573
1574 def writereporequirements(repo, requirements=None):
1574 def writereporequirements(repo, requirements=None):
1575 """ writes requirements for the repo to .hg/requires """
1575 """ writes requirements for the repo to .hg/requires """
1576 if requirements:
1576 if requirements:
1577 repo.requirements = requirements
1577 repo.requirements = requirements
1578 wcreq, storereq = filterrequirements(repo.requirements)
1578 wcreq, storereq = filterrequirements(repo.requirements)
1579 if wcreq is not None:
1579 if wcreq is not None:
1580 writerequires(repo.vfs, wcreq)
1580 writerequires(repo.vfs, wcreq)
1581 if storereq is not None:
1581 if storereq is not None:
1582 writerequires(repo.svfs, storereq)
1582 writerequires(repo.svfs, storereq)
1583 elif repo.ui.configbool(b'format', b'usestore'):
1584 # only remove store requires if we are using store
1585 repo.svfs.tryunlink(b'requires')
1583
1586
1584
1587
1585 def writerequires(opener, requirements):
1588 def writerequires(opener, requirements):
1586 with opener(b'requires', b'w', atomictemp=True) as fp:
1589 with opener(b'requires', b'w', atomictemp=True) as fp:
1587 for r in sorted(requirements):
1590 for r in sorted(requirements):
1588 fp.write(b"%s\n" % r)
1591 fp.write(b"%s\n" % r)
1589
1592
1590
1593
1591 class filecachesubentry(object):
1594 class filecachesubentry(object):
1592 def __init__(self, path, stat):
1595 def __init__(self, path, stat):
1593 self.path = path
1596 self.path = path
1594 self.cachestat = None
1597 self.cachestat = None
1595 self._cacheable = None
1598 self._cacheable = None
1596
1599
1597 if stat:
1600 if stat:
1598 self.cachestat = filecachesubentry.stat(self.path)
1601 self.cachestat = filecachesubentry.stat(self.path)
1599
1602
1600 if self.cachestat:
1603 if self.cachestat:
1601 self._cacheable = self.cachestat.cacheable()
1604 self._cacheable = self.cachestat.cacheable()
1602 else:
1605 else:
1603 # None means we don't know yet
1606 # None means we don't know yet
1604 self._cacheable = None
1607 self._cacheable = None
1605
1608
1606 def refresh(self):
1609 def refresh(self):
1607 if self.cacheable():
1610 if self.cacheable():
1608 self.cachestat = filecachesubentry.stat(self.path)
1611 self.cachestat = filecachesubentry.stat(self.path)
1609
1612
1610 def cacheable(self):
1613 def cacheable(self):
1611 if self._cacheable is not None:
1614 if self._cacheable is not None:
1612 return self._cacheable
1615 return self._cacheable
1613
1616
1614 # we don't know yet, assume it is for now
1617 # we don't know yet, assume it is for now
1615 return True
1618 return True
1616
1619
1617 def changed(self):
1620 def changed(self):
1618 # no point in going further if we can't cache it
1621 # no point in going further if we can't cache it
1619 if not self.cacheable():
1622 if not self.cacheable():
1620 return True
1623 return True
1621
1624
1622 newstat = filecachesubentry.stat(self.path)
1625 newstat = filecachesubentry.stat(self.path)
1623
1626
1624 # we may not know if it's cacheable yet, check again now
1627 # we may not know if it's cacheable yet, check again now
1625 if newstat and self._cacheable is None:
1628 if newstat and self._cacheable is None:
1626 self._cacheable = newstat.cacheable()
1629 self._cacheable = newstat.cacheable()
1627
1630
1628 # check again
1631 # check again
1629 if not self._cacheable:
1632 if not self._cacheable:
1630 return True
1633 return True
1631
1634
1632 if self.cachestat != newstat:
1635 if self.cachestat != newstat:
1633 self.cachestat = newstat
1636 self.cachestat = newstat
1634 return True
1637 return True
1635 else:
1638 else:
1636 return False
1639 return False
1637
1640
1638 @staticmethod
1641 @staticmethod
1639 def stat(path):
1642 def stat(path):
1640 try:
1643 try:
1641 return util.cachestat(path)
1644 return util.cachestat(path)
1642 except OSError as e:
1645 except OSError as e:
1643 if e.errno != errno.ENOENT:
1646 if e.errno != errno.ENOENT:
1644 raise
1647 raise
1645
1648
1646
1649
1647 class filecacheentry(object):
1650 class filecacheentry(object):
1648 def __init__(self, paths, stat=True):
1651 def __init__(self, paths, stat=True):
1649 self._entries = []
1652 self._entries = []
1650 for path in paths:
1653 for path in paths:
1651 self._entries.append(filecachesubentry(path, stat))
1654 self._entries.append(filecachesubentry(path, stat))
1652
1655
1653 def changed(self):
1656 def changed(self):
1654 '''true if any entry has changed'''
1657 '''true if any entry has changed'''
1655 for entry in self._entries:
1658 for entry in self._entries:
1656 if entry.changed():
1659 if entry.changed():
1657 return True
1660 return True
1658 return False
1661 return False
1659
1662
1660 def refresh(self):
1663 def refresh(self):
1661 for entry in self._entries:
1664 for entry in self._entries:
1662 entry.refresh()
1665 entry.refresh()
1663
1666
1664
1667
1665 class filecache(object):
1668 class filecache(object):
1666 """A property like decorator that tracks files under .hg/ for updates.
1669 """A property like decorator that tracks files under .hg/ for updates.
1667
1670
1668 On first access, the files defined as arguments are stat()ed and the
1671 On first access, the files defined as arguments are stat()ed and the
1669 results cached. The decorated function is called. The results are stashed
1672 results cached. The decorated function is called. The results are stashed
1670 away in a ``_filecache`` dict on the object whose method is decorated.
1673 away in a ``_filecache`` dict on the object whose method is decorated.
1671
1674
1672 On subsequent access, the cached result is used as it is set to the
1675 On subsequent access, the cached result is used as it is set to the
1673 instance dictionary.
1676 instance dictionary.
1674
1677
1675 On external property set/delete operations, the caller must update the
1678 On external property set/delete operations, the caller must update the
1676 corresponding _filecache entry appropriately. Use __class__.<attr>.set()
1679 corresponding _filecache entry appropriately. Use __class__.<attr>.set()
1677 instead of directly setting <attr>.
1680 instead of directly setting <attr>.
1678
1681
1679 When using the property API, the cached data is always used if available.
1682 When using the property API, the cached data is always used if available.
1680 No stat() is performed to check if the file has changed.
1683 No stat() is performed to check if the file has changed.
1681
1684
1682 Others can muck about with the state of the ``_filecache`` dict. e.g. they
1685 Others can muck about with the state of the ``_filecache`` dict. e.g. they
1683 can populate an entry before the property's getter is called. In this case,
1686 can populate an entry before the property's getter is called. In this case,
1684 entries in ``_filecache`` will be used during property operations,
1687 entries in ``_filecache`` will be used during property operations,
1685 if available. If the underlying file changes, it is up to external callers
1688 if available. If the underlying file changes, it is up to external callers
1686 to reflect this by e.g. calling ``delattr(obj, attr)`` to remove the cached
1689 to reflect this by e.g. calling ``delattr(obj, attr)`` to remove the cached
1687 method result as well as possibly calling ``del obj._filecache[attr]`` to
1690 method result as well as possibly calling ``del obj._filecache[attr]`` to
1688 remove the ``filecacheentry``.
1691 remove the ``filecacheentry``.
1689 """
1692 """
1690
1693
1691 def __init__(self, *paths):
1694 def __init__(self, *paths):
1692 self.paths = paths
1695 self.paths = paths
1693
1696
1694 def join(self, obj, fname):
1697 def join(self, obj, fname):
1695 """Used to compute the runtime path of a cached file.
1698 """Used to compute the runtime path of a cached file.
1696
1699
1697 Users should subclass filecache and provide their own version of this
1700 Users should subclass filecache and provide their own version of this
1698 function to call the appropriate join function on 'obj' (an instance
1701 function to call the appropriate join function on 'obj' (an instance
1699 of the class that its member function was decorated).
1702 of the class that its member function was decorated).
1700 """
1703 """
1701 raise NotImplementedError
1704 raise NotImplementedError
1702
1705
1703 def __call__(self, func):
1706 def __call__(self, func):
1704 self.func = func
1707 self.func = func
1705 self.sname = func.__name__
1708 self.sname = func.__name__
1706 self.name = pycompat.sysbytes(self.sname)
1709 self.name = pycompat.sysbytes(self.sname)
1707 return self
1710 return self
1708
1711
1709 def __get__(self, obj, type=None):
1712 def __get__(self, obj, type=None):
1710 # if accessed on the class, return the descriptor itself.
1713 # if accessed on the class, return the descriptor itself.
1711 if obj is None:
1714 if obj is None:
1712 return self
1715 return self
1713
1716
1714 assert self.sname not in obj.__dict__
1717 assert self.sname not in obj.__dict__
1715
1718
1716 entry = obj._filecache.get(self.name)
1719 entry = obj._filecache.get(self.name)
1717
1720
1718 if entry:
1721 if entry:
1719 if entry.changed():
1722 if entry.changed():
1720 entry.obj = self.func(obj)
1723 entry.obj = self.func(obj)
1721 else:
1724 else:
1722 paths = [self.join(obj, path) for path in self.paths]
1725 paths = [self.join(obj, path) for path in self.paths]
1723
1726
1724 # We stat -before- creating the object so our cache doesn't lie if
1727 # We stat -before- creating the object so our cache doesn't lie if
1725 # a writer modified between the time we read and stat
1728 # a writer modified between the time we read and stat
1726 entry = filecacheentry(paths, True)
1729 entry = filecacheentry(paths, True)
1727 entry.obj = self.func(obj)
1730 entry.obj = self.func(obj)
1728
1731
1729 obj._filecache[self.name] = entry
1732 obj._filecache[self.name] = entry
1730
1733
1731 obj.__dict__[self.sname] = entry.obj
1734 obj.__dict__[self.sname] = entry.obj
1732 return entry.obj
1735 return entry.obj
1733
1736
1734 # don't implement __set__(), which would make __dict__ lookup as slow as
1737 # don't implement __set__(), which would make __dict__ lookup as slow as
1735 # function call.
1738 # function call.
1736
1739
1737 def set(self, obj, value):
1740 def set(self, obj, value):
1738 if self.name not in obj._filecache:
1741 if self.name not in obj._filecache:
1739 # we add an entry for the missing value because X in __dict__
1742 # we add an entry for the missing value because X in __dict__
1740 # implies X in _filecache
1743 # implies X in _filecache
1741 paths = [self.join(obj, path) for path in self.paths]
1744 paths = [self.join(obj, path) for path in self.paths]
1742 ce = filecacheentry(paths, False)
1745 ce = filecacheentry(paths, False)
1743 obj._filecache[self.name] = ce
1746 obj._filecache[self.name] = ce
1744 else:
1747 else:
1745 ce = obj._filecache[self.name]
1748 ce = obj._filecache[self.name]
1746
1749
1747 ce.obj = value # update cached copy
1750 ce.obj = value # update cached copy
1748 obj.__dict__[self.sname] = value # update copy returned by obj.x
1751 obj.__dict__[self.sname] = value # update copy returned by obj.x
1749
1752
1750
1753
1751 def extdatasource(repo, source):
1754 def extdatasource(repo, source):
1752 """Gather a map of rev -> value dict from the specified source
1755 """Gather a map of rev -> value dict from the specified source
1753
1756
1754 A source spec is treated as a URL, with a special case shell: type
1757 A source spec is treated as a URL, with a special case shell: type
1755 for parsing the output from a shell command.
1758 for parsing the output from a shell command.
1756
1759
1757 The data is parsed as a series of newline-separated records where
1760 The data is parsed as a series of newline-separated records where
1758 each record is a revision specifier optionally followed by a space
1761 each record is a revision specifier optionally followed by a space
1759 and a freeform string value. If the revision is known locally, it
1762 and a freeform string value. If the revision is known locally, it
1760 is converted to a rev, otherwise the record is skipped.
1763 is converted to a rev, otherwise the record is skipped.
1761
1764
1762 Note that both key and value are treated as UTF-8 and converted to
1765 Note that both key and value are treated as UTF-8 and converted to
1763 the local encoding. This allows uniformity between local and
1766 the local encoding. This allows uniformity between local and
1764 remote data sources.
1767 remote data sources.
1765 """
1768 """
1766
1769
1767 spec = repo.ui.config(b"extdata", source)
1770 spec = repo.ui.config(b"extdata", source)
1768 if not spec:
1771 if not spec:
1769 raise error.Abort(_(b"unknown extdata source '%s'") % source)
1772 raise error.Abort(_(b"unknown extdata source '%s'") % source)
1770
1773
1771 data = {}
1774 data = {}
1772 src = proc = None
1775 src = proc = None
1773 try:
1776 try:
1774 if spec.startswith(b"shell:"):
1777 if spec.startswith(b"shell:"):
1775 # external commands should be run relative to the repo root
1778 # external commands should be run relative to the repo root
1776 cmd = spec[6:]
1779 cmd = spec[6:]
1777 proc = subprocess.Popen(
1780 proc = subprocess.Popen(
1778 procutil.tonativestr(cmd),
1781 procutil.tonativestr(cmd),
1779 shell=True,
1782 shell=True,
1780 bufsize=-1,
1783 bufsize=-1,
1781 close_fds=procutil.closefds,
1784 close_fds=procutil.closefds,
1782 stdout=subprocess.PIPE,
1785 stdout=subprocess.PIPE,
1783 cwd=procutil.tonativestr(repo.root),
1786 cwd=procutil.tonativestr(repo.root),
1784 )
1787 )
1785 src = proc.stdout
1788 src = proc.stdout
1786 else:
1789 else:
1787 # treat as a URL or file
1790 # treat as a URL or file
1788 src = url.open(repo.ui, spec)
1791 src = url.open(repo.ui, spec)
1789 for l in src:
1792 for l in src:
1790 if b" " in l:
1793 if b" " in l:
1791 k, v = l.strip().split(b" ", 1)
1794 k, v = l.strip().split(b" ", 1)
1792 else:
1795 else:
1793 k, v = l.strip(), b""
1796 k, v = l.strip(), b""
1794
1797
1795 k = encoding.tolocal(k)
1798 k = encoding.tolocal(k)
1796 try:
1799 try:
1797 data[revsingle(repo, k).rev()] = encoding.tolocal(v)
1800 data[revsingle(repo, k).rev()] = encoding.tolocal(v)
1798 except (error.LookupError, error.RepoLookupError):
1801 except (error.LookupError, error.RepoLookupError):
1799 pass # we ignore data for nodes that don't exist locally
1802 pass # we ignore data for nodes that don't exist locally
1800 finally:
1803 finally:
1801 if proc:
1804 if proc:
1802 try:
1805 try:
1803 proc.communicate()
1806 proc.communicate()
1804 except ValueError:
1807 except ValueError:
1805 # This happens if we started iterating src and then
1808 # This happens if we started iterating src and then
1806 # get a parse error on a line. It should be safe to ignore.
1809 # get a parse error on a line. It should be safe to ignore.
1807 pass
1810 pass
1808 if src:
1811 if src:
1809 src.close()
1812 src.close()
1810 if proc and proc.returncode != 0:
1813 if proc and proc.returncode != 0:
1811 raise error.Abort(
1814 raise error.Abort(
1812 _(b"extdata command '%s' failed: %s")
1815 _(b"extdata command '%s' failed: %s")
1813 % (cmd, procutil.explainexit(proc.returncode))
1816 % (cmd, procutil.explainexit(proc.returncode))
1814 )
1817 )
1815
1818
1816 return data
1819 return data
1817
1820
1818
1821
1819 class progress(object):
1822 class progress(object):
1820 def __init__(self, ui, updatebar, topic, unit=b"", total=None):
1823 def __init__(self, ui, updatebar, topic, unit=b"", total=None):
1821 self.ui = ui
1824 self.ui = ui
1822 self.pos = 0
1825 self.pos = 0
1823 self.topic = topic
1826 self.topic = topic
1824 self.unit = unit
1827 self.unit = unit
1825 self.total = total
1828 self.total = total
1826 self.debug = ui.configbool(b'progress', b'debug')
1829 self.debug = ui.configbool(b'progress', b'debug')
1827 self._updatebar = updatebar
1830 self._updatebar = updatebar
1828
1831
1829 def __enter__(self):
1832 def __enter__(self):
1830 return self
1833 return self
1831
1834
1832 def __exit__(self, exc_type, exc_value, exc_tb):
1835 def __exit__(self, exc_type, exc_value, exc_tb):
1833 self.complete()
1836 self.complete()
1834
1837
1835 def update(self, pos, item=b"", total=None):
1838 def update(self, pos, item=b"", total=None):
1836 assert pos is not None
1839 assert pos is not None
1837 if total:
1840 if total:
1838 self.total = total
1841 self.total = total
1839 self.pos = pos
1842 self.pos = pos
1840 self._updatebar(self.topic, self.pos, item, self.unit, self.total)
1843 self._updatebar(self.topic, self.pos, item, self.unit, self.total)
1841 if self.debug:
1844 if self.debug:
1842 self._printdebug(item)
1845 self._printdebug(item)
1843
1846
1844 def increment(self, step=1, item=b"", total=None):
1847 def increment(self, step=1, item=b"", total=None):
1845 self.update(self.pos + step, item, total)
1848 self.update(self.pos + step, item, total)
1846
1849
1847 def complete(self):
1850 def complete(self):
1848 self.pos = None
1851 self.pos = None
1849 self.unit = b""
1852 self.unit = b""
1850 self.total = None
1853 self.total = None
1851 self._updatebar(self.topic, self.pos, b"", self.unit, self.total)
1854 self._updatebar(self.topic, self.pos, b"", self.unit, self.total)
1852
1855
1853 def _printdebug(self, item):
1856 def _printdebug(self, item):
1854 unit = b''
1857 unit = b''
1855 if self.unit:
1858 if self.unit:
1856 unit = b' ' + self.unit
1859 unit = b' ' + self.unit
1857 if item:
1860 if item:
1858 item = b' ' + item
1861 item = b' ' + item
1859
1862
1860 if self.total:
1863 if self.total:
1861 pct = 100.0 * self.pos / self.total
1864 pct = 100.0 * self.pos / self.total
1862 self.ui.debug(
1865 self.ui.debug(
1863 b'%s:%s %d/%d%s (%4.2f%%)\n'
1866 b'%s:%s %d/%d%s (%4.2f%%)\n'
1864 % (self.topic, item, self.pos, self.total, unit, pct)
1867 % (self.topic, item, self.pos, self.total, unit, pct)
1865 )
1868 )
1866 else:
1869 else:
1867 self.ui.debug(b'%s:%s %d%s\n' % (self.topic, item, self.pos, unit))
1870 self.ui.debug(b'%s:%s %d%s\n' % (self.topic, item, self.pos, unit))
1868
1871
1869
1872
1870 def gdinitconfig(ui):
1873 def gdinitconfig(ui):
1871 """helper function to know if a repo should be created as general delta
1874 """helper function to know if a repo should be created as general delta
1872 """
1875 """
1873 # experimental config: format.generaldelta
1876 # experimental config: format.generaldelta
1874 return ui.configbool(b'format', b'generaldelta') or ui.configbool(
1877 return ui.configbool(b'format', b'generaldelta') or ui.configbool(
1875 b'format', b'usegeneraldelta'
1878 b'format', b'usegeneraldelta'
1876 )
1879 )
1877
1880
1878
1881
1879 def gddeltaconfig(ui):
1882 def gddeltaconfig(ui):
1880 """helper function to know if incoming delta should be optimised
1883 """helper function to know if incoming delta should be optimised
1881 """
1884 """
1882 # experimental config: format.generaldelta
1885 # experimental config: format.generaldelta
1883 return ui.configbool(b'format', b'generaldelta')
1886 return ui.configbool(b'format', b'generaldelta')
1884
1887
1885
1888
1886 class simplekeyvaluefile(object):
1889 class simplekeyvaluefile(object):
1887 """A simple file with key=value lines
1890 """A simple file with key=value lines
1888
1891
1889 Keys must be alphanumerics and start with a letter, values must not
1892 Keys must be alphanumerics and start with a letter, values must not
1890 contain '\n' characters"""
1893 contain '\n' characters"""
1891
1894
1892 firstlinekey = b'__firstline'
1895 firstlinekey = b'__firstline'
1893
1896
1894 def __init__(self, vfs, path, keys=None):
1897 def __init__(self, vfs, path, keys=None):
1895 self.vfs = vfs
1898 self.vfs = vfs
1896 self.path = path
1899 self.path = path
1897
1900
1898 def read(self, firstlinenonkeyval=False):
1901 def read(self, firstlinenonkeyval=False):
1899 """Read the contents of a simple key-value file
1902 """Read the contents of a simple key-value file
1900
1903
1901 'firstlinenonkeyval' indicates whether the first line of file should
1904 'firstlinenonkeyval' indicates whether the first line of file should
1902 be treated as a key-value pair or reuturned fully under the
1905 be treated as a key-value pair or reuturned fully under the
1903 __firstline key."""
1906 __firstline key."""
1904 lines = self.vfs.readlines(self.path)
1907 lines = self.vfs.readlines(self.path)
1905 d = {}
1908 d = {}
1906 if firstlinenonkeyval:
1909 if firstlinenonkeyval:
1907 if not lines:
1910 if not lines:
1908 e = _(b"empty simplekeyvalue file")
1911 e = _(b"empty simplekeyvalue file")
1909 raise error.CorruptedState(e)
1912 raise error.CorruptedState(e)
1910 # we don't want to include '\n' in the __firstline
1913 # we don't want to include '\n' in the __firstline
1911 d[self.firstlinekey] = lines[0][:-1]
1914 d[self.firstlinekey] = lines[0][:-1]
1912 del lines[0]
1915 del lines[0]
1913
1916
1914 try:
1917 try:
1915 # the 'if line.strip()' part prevents us from failing on empty
1918 # the 'if line.strip()' part prevents us from failing on empty
1916 # lines which only contain '\n' therefore are not skipped
1919 # lines which only contain '\n' therefore are not skipped
1917 # by 'if line'
1920 # by 'if line'
1918 updatedict = dict(
1921 updatedict = dict(
1919 line[:-1].split(b'=', 1) for line in lines if line.strip()
1922 line[:-1].split(b'=', 1) for line in lines if line.strip()
1920 )
1923 )
1921 if self.firstlinekey in updatedict:
1924 if self.firstlinekey in updatedict:
1922 e = _(b"%r can't be used as a key")
1925 e = _(b"%r can't be used as a key")
1923 raise error.CorruptedState(e % self.firstlinekey)
1926 raise error.CorruptedState(e % self.firstlinekey)
1924 d.update(updatedict)
1927 d.update(updatedict)
1925 except ValueError as e:
1928 except ValueError as e:
1926 raise error.CorruptedState(stringutil.forcebytestr(e))
1929 raise error.CorruptedState(stringutil.forcebytestr(e))
1927 return d
1930 return d
1928
1931
1929 def write(self, data, firstline=None):
1932 def write(self, data, firstline=None):
1930 """Write key=>value mapping to a file
1933 """Write key=>value mapping to a file
1931 data is a dict. Keys must be alphanumerical and start with a letter.
1934 data is a dict. Keys must be alphanumerical and start with a letter.
1932 Values must not contain newline characters.
1935 Values must not contain newline characters.
1933
1936
1934 If 'firstline' is not None, it is written to file before
1937 If 'firstline' is not None, it is written to file before
1935 everything else, as it is, not in a key=value form"""
1938 everything else, as it is, not in a key=value form"""
1936 lines = []
1939 lines = []
1937 if firstline is not None:
1940 if firstline is not None:
1938 lines.append(b'%s\n' % firstline)
1941 lines.append(b'%s\n' % firstline)
1939
1942
1940 for k, v in data.items():
1943 for k, v in data.items():
1941 if k == self.firstlinekey:
1944 if k == self.firstlinekey:
1942 e = b"key name '%s' is reserved" % self.firstlinekey
1945 e = b"key name '%s' is reserved" % self.firstlinekey
1943 raise error.ProgrammingError(e)
1946 raise error.ProgrammingError(e)
1944 if not k[0:1].isalpha():
1947 if not k[0:1].isalpha():
1945 e = b"keys must start with a letter in a key-value file"
1948 e = b"keys must start with a letter in a key-value file"
1946 raise error.ProgrammingError(e)
1949 raise error.ProgrammingError(e)
1947 if not k.isalnum():
1950 if not k.isalnum():
1948 e = b"invalid key name in a simple key-value file"
1951 e = b"invalid key name in a simple key-value file"
1949 raise error.ProgrammingError(e)
1952 raise error.ProgrammingError(e)
1950 if b'\n' in v:
1953 if b'\n' in v:
1951 e = b"invalid value in a simple key-value file"
1954 e = b"invalid value in a simple key-value file"
1952 raise error.ProgrammingError(e)
1955 raise error.ProgrammingError(e)
1953 lines.append(b"%s=%s\n" % (k, v))
1956 lines.append(b"%s=%s\n" % (k, v))
1954 with self.vfs(self.path, mode=b'wb', atomictemp=True) as fp:
1957 with self.vfs(self.path, mode=b'wb', atomictemp=True) as fp:
1955 fp.write(b''.join(lines))
1958 fp.write(b''.join(lines))
1956
1959
1957
1960
1958 _reportobsoletedsource = [
1961 _reportobsoletedsource = [
1959 b'debugobsolete',
1962 b'debugobsolete',
1960 b'pull',
1963 b'pull',
1961 b'push',
1964 b'push',
1962 b'serve',
1965 b'serve',
1963 b'unbundle',
1966 b'unbundle',
1964 ]
1967 ]
1965
1968
1966 _reportnewcssource = [
1969 _reportnewcssource = [
1967 b'pull',
1970 b'pull',
1968 b'unbundle',
1971 b'unbundle',
1969 ]
1972 ]
1970
1973
1971
1974
1972 def prefetchfiles(repo, revmatches):
1975 def prefetchfiles(repo, revmatches):
1973 """Invokes the registered file prefetch functions, allowing extensions to
1976 """Invokes the registered file prefetch functions, allowing extensions to
1974 ensure the corresponding files are available locally, before the command
1977 ensure the corresponding files are available locally, before the command
1975 uses them.
1978 uses them.
1976
1979
1977 Args:
1980 Args:
1978 revmatches: a list of (revision, match) tuples to indicate the files to
1981 revmatches: a list of (revision, match) tuples to indicate the files to
1979 fetch at each revision. If any of the match elements is None, it matches
1982 fetch at each revision. If any of the match elements is None, it matches
1980 all files.
1983 all files.
1981 """
1984 """
1982
1985
1983 def _matcher(m):
1986 def _matcher(m):
1984 if m:
1987 if m:
1985 assert isinstance(m, matchmod.basematcher)
1988 assert isinstance(m, matchmod.basematcher)
1986 # The command itself will complain about files that don't exist, so
1989 # The command itself will complain about files that don't exist, so
1987 # don't duplicate the message.
1990 # don't duplicate the message.
1988 return matchmod.badmatch(m, lambda fn, msg: None)
1991 return matchmod.badmatch(m, lambda fn, msg: None)
1989 else:
1992 else:
1990 return matchall(repo)
1993 return matchall(repo)
1991
1994
1992 revbadmatches = [(rev, _matcher(match)) for (rev, match) in revmatches]
1995 revbadmatches = [(rev, _matcher(match)) for (rev, match) in revmatches]
1993
1996
1994 fileprefetchhooks(repo, revbadmatches)
1997 fileprefetchhooks(repo, revbadmatches)
1995
1998
1996
1999
1997 # a list of (repo, revs, match) prefetch functions
2000 # a list of (repo, revs, match) prefetch functions
1998 fileprefetchhooks = util.hooks()
2001 fileprefetchhooks = util.hooks()
1999
2002
2000 # A marker that tells the evolve extension to suppress its own reporting
2003 # A marker that tells the evolve extension to suppress its own reporting
2001 _reportstroubledchangesets = True
2004 _reportstroubledchangesets = True
2002
2005
2003
2006
2004 def registersummarycallback(repo, otr, txnname=b'', as_validator=False):
2007 def registersummarycallback(repo, otr, txnname=b'', as_validator=False):
2005 """register a callback to issue a summary after the transaction is closed
2008 """register a callback to issue a summary after the transaction is closed
2006
2009
2007 If as_validator is true, then the callbacks are registered as transaction
2010 If as_validator is true, then the callbacks are registered as transaction
2008 validators instead
2011 validators instead
2009 """
2012 """
2010
2013
2011 def txmatch(sources):
2014 def txmatch(sources):
2012 return any(txnname.startswith(source) for source in sources)
2015 return any(txnname.startswith(source) for source in sources)
2013
2016
2014 categories = []
2017 categories = []
2015
2018
2016 def reportsummary(func):
2019 def reportsummary(func):
2017 """decorator for report callbacks."""
2020 """decorator for report callbacks."""
2018 # The repoview life cycle is shorter than the one of the actual
2021 # The repoview life cycle is shorter than the one of the actual
2019 # underlying repository. So the filtered object can die before the
2022 # underlying repository. So the filtered object can die before the
2020 # weakref is used leading to troubles. We keep a reference to the
2023 # weakref is used leading to troubles. We keep a reference to the
2021 # unfiltered object and restore the filtering when retrieving the
2024 # unfiltered object and restore the filtering when retrieving the
2022 # repository through the weakref.
2025 # repository through the weakref.
2023 filtername = repo.filtername
2026 filtername = repo.filtername
2024 reporef = weakref.ref(repo.unfiltered())
2027 reporef = weakref.ref(repo.unfiltered())
2025
2028
2026 def wrapped(tr):
2029 def wrapped(tr):
2027 repo = reporef()
2030 repo = reporef()
2028 if filtername:
2031 if filtername:
2029 assert repo is not None # help pytype
2032 assert repo is not None # help pytype
2030 repo = repo.filtered(filtername)
2033 repo = repo.filtered(filtername)
2031 func(repo, tr)
2034 func(repo, tr)
2032
2035
2033 newcat = b'%02i-txnreport' % len(categories)
2036 newcat = b'%02i-txnreport' % len(categories)
2034 if as_validator:
2037 if as_validator:
2035 otr.addvalidator(newcat, wrapped)
2038 otr.addvalidator(newcat, wrapped)
2036 else:
2039 else:
2037 otr.addpostclose(newcat, wrapped)
2040 otr.addpostclose(newcat, wrapped)
2038 categories.append(newcat)
2041 categories.append(newcat)
2039 return wrapped
2042 return wrapped
2040
2043
2041 @reportsummary
2044 @reportsummary
2042 def reportchangegroup(repo, tr):
2045 def reportchangegroup(repo, tr):
2043 cgchangesets = tr.changes.get(b'changegroup-count-changesets', 0)
2046 cgchangesets = tr.changes.get(b'changegroup-count-changesets', 0)
2044 cgrevisions = tr.changes.get(b'changegroup-count-revisions', 0)
2047 cgrevisions = tr.changes.get(b'changegroup-count-revisions', 0)
2045 cgfiles = tr.changes.get(b'changegroup-count-files', 0)
2048 cgfiles = tr.changes.get(b'changegroup-count-files', 0)
2046 cgheads = tr.changes.get(b'changegroup-count-heads', 0)
2049 cgheads = tr.changes.get(b'changegroup-count-heads', 0)
2047 if cgchangesets or cgrevisions or cgfiles:
2050 if cgchangesets or cgrevisions or cgfiles:
2048 htext = b""
2051 htext = b""
2049 if cgheads:
2052 if cgheads:
2050 htext = _(b" (%+d heads)") % cgheads
2053 htext = _(b" (%+d heads)") % cgheads
2051 msg = _(b"added %d changesets with %d changes to %d files%s\n")
2054 msg = _(b"added %d changesets with %d changes to %d files%s\n")
2052 if as_validator:
2055 if as_validator:
2053 msg = _(b"adding %d changesets with %d changes to %d files%s\n")
2056 msg = _(b"adding %d changesets with %d changes to %d files%s\n")
2054 assert repo is not None # help pytype
2057 assert repo is not None # help pytype
2055 repo.ui.status(msg % (cgchangesets, cgrevisions, cgfiles, htext))
2058 repo.ui.status(msg % (cgchangesets, cgrevisions, cgfiles, htext))
2056
2059
2057 if txmatch(_reportobsoletedsource):
2060 if txmatch(_reportobsoletedsource):
2058
2061
2059 @reportsummary
2062 @reportsummary
2060 def reportobsoleted(repo, tr):
2063 def reportobsoleted(repo, tr):
2061 obsoleted = obsutil.getobsoleted(repo, tr)
2064 obsoleted = obsutil.getobsoleted(repo, tr)
2062 newmarkers = len(tr.changes.get(b'obsmarkers', ()))
2065 newmarkers = len(tr.changes.get(b'obsmarkers', ()))
2063 if newmarkers:
2066 if newmarkers:
2064 repo.ui.status(_(b'%i new obsolescence markers\n') % newmarkers)
2067 repo.ui.status(_(b'%i new obsolescence markers\n') % newmarkers)
2065 if obsoleted:
2068 if obsoleted:
2066 msg = _(b'obsoleted %i changesets\n')
2069 msg = _(b'obsoleted %i changesets\n')
2067 if as_validator:
2070 if as_validator:
2068 msg = _(b'obsoleting %i changesets\n')
2071 msg = _(b'obsoleting %i changesets\n')
2069 repo.ui.status(msg % len(obsoleted))
2072 repo.ui.status(msg % len(obsoleted))
2070
2073
2071 if obsolete.isenabled(
2074 if obsolete.isenabled(
2072 repo, obsolete.createmarkersopt
2075 repo, obsolete.createmarkersopt
2073 ) and repo.ui.configbool(
2076 ) and repo.ui.configbool(
2074 b'experimental', b'evolution.report-instabilities'
2077 b'experimental', b'evolution.report-instabilities'
2075 ):
2078 ):
2076 instabilitytypes = [
2079 instabilitytypes = [
2077 (b'orphan', b'orphan'),
2080 (b'orphan', b'orphan'),
2078 (b'phase-divergent', b'phasedivergent'),
2081 (b'phase-divergent', b'phasedivergent'),
2079 (b'content-divergent', b'contentdivergent'),
2082 (b'content-divergent', b'contentdivergent'),
2080 ]
2083 ]
2081
2084
2082 def getinstabilitycounts(repo):
2085 def getinstabilitycounts(repo):
2083 filtered = repo.changelog.filteredrevs
2086 filtered = repo.changelog.filteredrevs
2084 counts = {}
2087 counts = {}
2085 for instability, revset in instabilitytypes:
2088 for instability, revset in instabilitytypes:
2086 counts[instability] = len(
2089 counts[instability] = len(
2087 set(obsolete.getrevs(repo, revset)) - filtered
2090 set(obsolete.getrevs(repo, revset)) - filtered
2088 )
2091 )
2089 return counts
2092 return counts
2090
2093
2091 oldinstabilitycounts = getinstabilitycounts(repo)
2094 oldinstabilitycounts = getinstabilitycounts(repo)
2092
2095
2093 @reportsummary
2096 @reportsummary
2094 def reportnewinstabilities(repo, tr):
2097 def reportnewinstabilities(repo, tr):
2095 newinstabilitycounts = getinstabilitycounts(repo)
2098 newinstabilitycounts = getinstabilitycounts(repo)
2096 for instability, revset in instabilitytypes:
2099 for instability, revset in instabilitytypes:
2097 delta = (
2100 delta = (
2098 newinstabilitycounts[instability]
2101 newinstabilitycounts[instability]
2099 - oldinstabilitycounts[instability]
2102 - oldinstabilitycounts[instability]
2100 )
2103 )
2101 msg = getinstabilitymessage(delta, instability)
2104 msg = getinstabilitymessage(delta, instability)
2102 if msg:
2105 if msg:
2103 repo.ui.warn(msg)
2106 repo.ui.warn(msg)
2104
2107
2105 if txmatch(_reportnewcssource):
2108 if txmatch(_reportnewcssource):
2106
2109
2107 @reportsummary
2110 @reportsummary
2108 def reportnewcs(repo, tr):
2111 def reportnewcs(repo, tr):
2109 """Report the range of new revisions pulled/unbundled."""
2112 """Report the range of new revisions pulled/unbundled."""
2110 origrepolen = tr.changes.get(b'origrepolen', len(repo))
2113 origrepolen = tr.changes.get(b'origrepolen', len(repo))
2111 unfi = repo.unfiltered()
2114 unfi = repo.unfiltered()
2112 if origrepolen >= len(unfi):
2115 if origrepolen >= len(unfi):
2113 return
2116 return
2114
2117
2115 # Compute the bounds of new visible revisions' range.
2118 # Compute the bounds of new visible revisions' range.
2116 revs = smartset.spanset(repo, start=origrepolen)
2119 revs = smartset.spanset(repo, start=origrepolen)
2117 if revs:
2120 if revs:
2118 minrev, maxrev = repo[revs.min()], repo[revs.max()]
2121 minrev, maxrev = repo[revs.min()], repo[revs.max()]
2119
2122
2120 if minrev == maxrev:
2123 if minrev == maxrev:
2121 revrange = minrev
2124 revrange = minrev
2122 else:
2125 else:
2123 revrange = b'%s:%s' % (minrev, maxrev)
2126 revrange = b'%s:%s' % (minrev, maxrev)
2124 draft = len(repo.revs(b'%ld and draft()', revs))
2127 draft = len(repo.revs(b'%ld and draft()', revs))
2125 secret = len(repo.revs(b'%ld and secret()', revs))
2128 secret = len(repo.revs(b'%ld and secret()', revs))
2126 if not (draft or secret):
2129 if not (draft or secret):
2127 msg = _(b'new changesets %s\n') % revrange
2130 msg = _(b'new changesets %s\n') % revrange
2128 elif draft and secret:
2131 elif draft and secret:
2129 msg = _(b'new changesets %s (%d drafts, %d secrets)\n')
2132 msg = _(b'new changesets %s (%d drafts, %d secrets)\n')
2130 msg %= (revrange, draft, secret)
2133 msg %= (revrange, draft, secret)
2131 elif draft:
2134 elif draft:
2132 msg = _(b'new changesets %s (%d drafts)\n')
2135 msg = _(b'new changesets %s (%d drafts)\n')
2133 msg %= (revrange, draft)
2136 msg %= (revrange, draft)
2134 elif secret:
2137 elif secret:
2135 msg = _(b'new changesets %s (%d secrets)\n')
2138 msg = _(b'new changesets %s (%d secrets)\n')
2136 msg %= (revrange, secret)
2139 msg %= (revrange, secret)
2137 else:
2140 else:
2138 errormsg = b'entered unreachable condition'
2141 errormsg = b'entered unreachable condition'
2139 raise error.ProgrammingError(errormsg)
2142 raise error.ProgrammingError(errormsg)
2140 repo.ui.status(msg)
2143 repo.ui.status(msg)
2141
2144
2142 # search new changesets directly pulled as obsolete
2145 # search new changesets directly pulled as obsolete
2143 duplicates = tr.changes.get(b'revduplicates', ())
2146 duplicates = tr.changes.get(b'revduplicates', ())
2144 obsadded = unfi.revs(
2147 obsadded = unfi.revs(
2145 b'(%d: + %ld) and obsolete()', origrepolen, duplicates
2148 b'(%d: + %ld) and obsolete()', origrepolen, duplicates
2146 )
2149 )
2147 cl = repo.changelog
2150 cl = repo.changelog
2148 extinctadded = [r for r in obsadded if r not in cl]
2151 extinctadded = [r for r in obsadded if r not in cl]
2149 if extinctadded:
2152 if extinctadded:
2150 # They are not just obsolete, but obsolete and invisible
2153 # They are not just obsolete, but obsolete and invisible
2151 # we call them "extinct" internally but the terms have not been
2154 # we call them "extinct" internally but the terms have not been
2152 # exposed to users.
2155 # exposed to users.
2153 msg = b'(%d other changesets obsolete on arrival)\n'
2156 msg = b'(%d other changesets obsolete on arrival)\n'
2154 repo.ui.status(msg % len(extinctadded))
2157 repo.ui.status(msg % len(extinctadded))
2155
2158
2156 @reportsummary
2159 @reportsummary
2157 def reportphasechanges(repo, tr):
2160 def reportphasechanges(repo, tr):
2158 """Report statistics of phase changes for changesets pre-existing
2161 """Report statistics of phase changes for changesets pre-existing
2159 pull/unbundle.
2162 pull/unbundle.
2160 """
2163 """
2161 origrepolen = tr.changes.get(b'origrepolen', len(repo))
2164 origrepolen = tr.changes.get(b'origrepolen', len(repo))
2162 published = []
2165 published = []
2163 for revs, (old, new) in tr.changes.get(b'phases', []):
2166 for revs, (old, new) in tr.changes.get(b'phases', []):
2164 if new != phases.public:
2167 if new != phases.public:
2165 continue
2168 continue
2166 published.extend(rev for rev in revs if rev < origrepolen)
2169 published.extend(rev for rev in revs if rev < origrepolen)
2167 if not published:
2170 if not published:
2168 return
2171 return
2169 msg = _(b'%d local changesets published\n')
2172 msg = _(b'%d local changesets published\n')
2170 if as_validator:
2173 if as_validator:
2171 msg = _(b'%d local changesets will be published\n')
2174 msg = _(b'%d local changesets will be published\n')
2172 repo.ui.status(msg % len(published))
2175 repo.ui.status(msg % len(published))
2173
2176
2174
2177
2175 def getinstabilitymessage(delta, instability):
2178 def getinstabilitymessage(delta, instability):
2176 """function to return the message to show warning about new instabilities
2179 """function to return the message to show warning about new instabilities
2177
2180
2178 exists as a separate function so that extension can wrap to show more
2181 exists as a separate function so that extension can wrap to show more
2179 information like how to fix instabilities"""
2182 information like how to fix instabilities"""
2180 if delta > 0:
2183 if delta > 0:
2181 return _(b'%i new %s changesets\n') % (delta, instability)
2184 return _(b'%i new %s changesets\n') % (delta, instability)
2182
2185
2183
2186
2184 def nodesummaries(repo, nodes, maxnumnodes=4):
2187 def nodesummaries(repo, nodes, maxnumnodes=4):
2185 if len(nodes) <= maxnumnodes or repo.ui.verbose:
2188 if len(nodes) <= maxnumnodes or repo.ui.verbose:
2186 return b' '.join(short(h) for h in nodes)
2189 return b' '.join(short(h) for h in nodes)
2187 first = b' '.join(short(h) for h in nodes[:maxnumnodes])
2190 first = b' '.join(short(h) for h in nodes[:maxnumnodes])
2188 return _(b"%s and %d others") % (first, len(nodes) - maxnumnodes)
2191 return _(b"%s and %d others") % (first, len(nodes) - maxnumnodes)
2189
2192
2190
2193
2191 def enforcesinglehead(repo, tr, desc, accountclosed=False):
2194 def enforcesinglehead(repo, tr, desc, accountclosed=False):
2192 """check that no named branch has multiple heads"""
2195 """check that no named branch has multiple heads"""
2193 if desc in (b'strip', b'repair'):
2196 if desc in (b'strip', b'repair'):
2194 # skip the logic during strip
2197 # skip the logic during strip
2195 return
2198 return
2196 visible = repo.filtered(b'visible')
2199 visible = repo.filtered(b'visible')
2197 # possible improvement: we could restrict the check to affected branch
2200 # possible improvement: we could restrict the check to affected branch
2198 bm = visible.branchmap()
2201 bm = visible.branchmap()
2199 for name in bm:
2202 for name in bm:
2200 heads = bm.branchheads(name, closed=accountclosed)
2203 heads = bm.branchheads(name, closed=accountclosed)
2201 if len(heads) > 1:
2204 if len(heads) > 1:
2202 msg = _(b'rejecting multiple heads on branch "%s"')
2205 msg = _(b'rejecting multiple heads on branch "%s"')
2203 msg %= name
2206 msg %= name
2204 hint = _(b'%d heads: %s')
2207 hint = _(b'%d heads: %s')
2205 hint %= (len(heads), nodesummaries(repo, heads))
2208 hint %= (len(heads), nodesummaries(repo, heads))
2206 raise error.Abort(msg, hint=hint)
2209 raise error.Abort(msg, hint=hint)
2207
2210
2208
2211
2209 def wrapconvertsink(sink):
2212 def wrapconvertsink(sink):
2210 """Allow extensions to wrap the sink returned by convcmd.convertsink()
2213 """Allow extensions to wrap the sink returned by convcmd.convertsink()
2211 before it is used, whether or not the convert extension was formally loaded.
2214 before it is used, whether or not the convert extension was formally loaded.
2212 """
2215 """
2213 return sink
2216 return sink
2214
2217
2215
2218
2216 def unhidehashlikerevs(repo, specs, hiddentype):
2219 def unhidehashlikerevs(repo, specs, hiddentype):
2217 """parse the user specs and unhide changesets whose hash or revision number
2220 """parse the user specs and unhide changesets whose hash or revision number
2218 is passed.
2221 is passed.
2219
2222
2220 hiddentype can be: 1) 'warn': warn while unhiding changesets
2223 hiddentype can be: 1) 'warn': warn while unhiding changesets
2221 2) 'nowarn': don't warn while unhiding changesets
2224 2) 'nowarn': don't warn while unhiding changesets
2222
2225
2223 returns a repo object with the required changesets unhidden
2226 returns a repo object with the required changesets unhidden
2224 """
2227 """
2225 if not repo.filtername or not repo.ui.configbool(
2228 if not repo.filtername or not repo.ui.configbool(
2226 b'experimental', b'directaccess'
2229 b'experimental', b'directaccess'
2227 ):
2230 ):
2228 return repo
2231 return repo
2229
2232
2230 if repo.filtername not in (b'visible', b'visible-hidden'):
2233 if repo.filtername not in (b'visible', b'visible-hidden'):
2231 return repo
2234 return repo
2232
2235
2233 symbols = set()
2236 symbols = set()
2234 for spec in specs:
2237 for spec in specs:
2235 try:
2238 try:
2236 tree = revsetlang.parse(spec)
2239 tree = revsetlang.parse(spec)
2237 except error.ParseError: # will be reported by scmutil.revrange()
2240 except error.ParseError: # will be reported by scmutil.revrange()
2238 continue
2241 continue
2239
2242
2240 symbols.update(revsetlang.gethashlikesymbols(tree))
2243 symbols.update(revsetlang.gethashlikesymbols(tree))
2241
2244
2242 if not symbols:
2245 if not symbols:
2243 return repo
2246 return repo
2244
2247
2245 revs = _getrevsfromsymbols(repo, symbols)
2248 revs = _getrevsfromsymbols(repo, symbols)
2246
2249
2247 if not revs:
2250 if not revs:
2248 return repo
2251 return repo
2249
2252
2250 if hiddentype == b'warn':
2253 if hiddentype == b'warn':
2251 unfi = repo.unfiltered()
2254 unfi = repo.unfiltered()
2252 revstr = b", ".join([pycompat.bytestr(unfi[l]) for l in revs])
2255 revstr = b", ".join([pycompat.bytestr(unfi[l]) for l in revs])
2253 repo.ui.warn(
2256 repo.ui.warn(
2254 _(
2257 _(
2255 b"warning: accessing hidden changesets for write "
2258 b"warning: accessing hidden changesets for write "
2256 b"operation: %s\n"
2259 b"operation: %s\n"
2257 )
2260 )
2258 % revstr
2261 % revstr
2259 )
2262 )
2260
2263
2261 # we have to use new filtername to separate branch/tags cache until we can
2264 # we have to use new filtername to separate branch/tags cache until we can
2262 # disbale these cache when revisions are dynamically pinned.
2265 # disbale these cache when revisions are dynamically pinned.
2263 return repo.filtered(b'visible-hidden', revs)
2266 return repo.filtered(b'visible-hidden', revs)
2264
2267
2265
2268
2266 def _getrevsfromsymbols(repo, symbols):
2269 def _getrevsfromsymbols(repo, symbols):
2267 """parse the list of symbols and returns a set of revision numbers of hidden
2270 """parse the list of symbols and returns a set of revision numbers of hidden
2268 changesets present in symbols"""
2271 changesets present in symbols"""
2269 revs = set()
2272 revs = set()
2270 unfi = repo.unfiltered()
2273 unfi = repo.unfiltered()
2271 unficl = unfi.changelog
2274 unficl = unfi.changelog
2272 cl = repo.changelog
2275 cl = repo.changelog
2273 tiprev = len(unficl)
2276 tiprev = len(unficl)
2274 allowrevnums = repo.ui.configbool(b'experimental', b'directaccess.revnums')
2277 allowrevnums = repo.ui.configbool(b'experimental', b'directaccess.revnums')
2275 for s in symbols:
2278 for s in symbols:
2276 try:
2279 try:
2277 n = int(s)
2280 n = int(s)
2278 if n <= tiprev:
2281 if n <= tiprev:
2279 if not allowrevnums:
2282 if not allowrevnums:
2280 continue
2283 continue
2281 else:
2284 else:
2282 if n not in cl:
2285 if n not in cl:
2283 revs.add(n)
2286 revs.add(n)
2284 continue
2287 continue
2285 except ValueError:
2288 except ValueError:
2286 pass
2289 pass
2287
2290
2288 try:
2291 try:
2289 s = resolvehexnodeidprefix(unfi, s)
2292 s = resolvehexnodeidprefix(unfi, s)
2290 except (error.LookupError, error.WdirUnsupported):
2293 except (error.LookupError, error.WdirUnsupported):
2291 s = None
2294 s = None
2292
2295
2293 if s is not None:
2296 if s is not None:
2294 rev = unficl.rev(s)
2297 rev = unficl.rev(s)
2295 if rev not in cl:
2298 if rev not in cl:
2296 revs.add(rev)
2299 revs.add(rev)
2297
2300
2298 return revs
2301 return revs
2299
2302
2300
2303
2301 def bookmarkrevs(repo, mark):
2304 def bookmarkrevs(repo, mark):
2302 """
2305 """
2303 Select revisions reachable by a given bookmark
2306 Select revisions reachable by a given bookmark
2304 """
2307 """
2305 return repo.revs(
2308 return repo.revs(
2306 b"ancestors(bookmark(%s)) - "
2309 b"ancestors(bookmark(%s)) - "
2307 b"ancestors(head() and not bookmark(%s)) - "
2310 b"ancestors(head() and not bookmark(%s)) - "
2308 b"ancestors(bookmark() and not bookmark(%s))",
2311 b"ancestors(bookmark() and not bookmark(%s))",
2309 mark,
2312 mark,
2310 mark,
2313 mark,
2311 mark,
2314 mark,
2312 )
2315 )
General Comments 0
You need to be logged in to leave comments. Login now