##// END OF EJS Templates
scmutil: display the optional hint when handling StorageError in catchall()...
Matt Harbison -
r40694:4ec8bee1 default
parent child Browse files
Show More
@@ -1,1800 +1,1802
1 # scmutil.py - Mercurial core utility functions
1 # scmutil.py - Mercurial core utility functions
2 #
2 #
3 # Copyright Matt Mackall <mpm@selenic.com>
3 # Copyright Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import glob
11 import glob
12 import hashlib
12 import hashlib
13 import os
13 import os
14 import re
14 import re
15 import socket
15 import socket
16 import subprocess
16 import subprocess
17 import weakref
17 import weakref
18
18
19 from .i18n import _
19 from .i18n import _
20 from .node import (
20 from .node import (
21 bin,
21 bin,
22 hex,
22 hex,
23 nullid,
23 nullid,
24 nullrev,
24 nullrev,
25 short,
25 short,
26 wdirid,
26 wdirid,
27 wdirrev,
27 wdirrev,
28 )
28 )
29
29
30 from . import (
30 from . import (
31 encoding,
31 encoding,
32 error,
32 error,
33 match as matchmod,
33 match as matchmod,
34 obsolete,
34 obsolete,
35 obsutil,
35 obsutil,
36 pathutil,
36 pathutil,
37 phases,
37 phases,
38 policy,
38 policy,
39 pycompat,
39 pycompat,
40 revsetlang,
40 revsetlang,
41 similar,
41 similar,
42 smartset,
42 smartset,
43 url,
43 url,
44 util,
44 util,
45 vfs,
45 vfs,
46 )
46 )
47
47
48 from .utils import (
48 from .utils import (
49 procutil,
49 procutil,
50 stringutil,
50 stringutil,
51 )
51 )
52
52
53 if pycompat.iswindows:
53 if pycompat.iswindows:
54 from . import scmwindows as scmplatform
54 from . import scmwindows as scmplatform
55 else:
55 else:
56 from . import scmposix as scmplatform
56 from . import scmposix as scmplatform
57
57
58 parsers = policy.importmod(r'parsers')
58 parsers = policy.importmod(r'parsers')
59
59
60 termsize = scmplatform.termsize
60 termsize = scmplatform.termsize
61
61
62 class status(tuple):
62 class status(tuple):
63 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
63 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
64 and 'ignored' properties are only relevant to the working copy.
64 and 'ignored' properties are only relevant to the working copy.
65 '''
65 '''
66
66
67 __slots__ = ()
67 __slots__ = ()
68
68
69 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
69 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
70 clean):
70 clean):
71 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
71 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
72 ignored, clean))
72 ignored, clean))
73
73
74 @property
74 @property
75 def modified(self):
75 def modified(self):
76 '''files that have been modified'''
76 '''files that have been modified'''
77 return self[0]
77 return self[0]
78
78
79 @property
79 @property
80 def added(self):
80 def added(self):
81 '''files that have been added'''
81 '''files that have been added'''
82 return self[1]
82 return self[1]
83
83
84 @property
84 @property
85 def removed(self):
85 def removed(self):
86 '''files that have been removed'''
86 '''files that have been removed'''
87 return self[2]
87 return self[2]
88
88
89 @property
89 @property
90 def deleted(self):
90 def deleted(self):
91 '''files that are in the dirstate, but have been deleted from the
91 '''files that are in the dirstate, but have been deleted from the
92 working copy (aka "missing")
92 working copy (aka "missing")
93 '''
93 '''
94 return self[3]
94 return self[3]
95
95
96 @property
96 @property
97 def unknown(self):
97 def unknown(self):
98 '''files not in the dirstate that are not ignored'''
98 '''files not in the dirstate that are not ignored'''
99 return self[4]
99 return self[4]
100
100
101 @property
101 @property
102 def ignored(self):
102 def ignored(self):
103 '''files not in the dirstate that are ignored (by _dirignore())'''
103 '''files not in the dirstate that are ignored (by _dirignore())'''
104 return self[5]
104 return self[5]
105
105
106 @property
106 @property
107 def clean(self):
107 def clean(self):
108 '''files that have not been modified'''
108 '''files that have not been modified'''
109 return self[6]
109 return self[6]
110
110
111 def __repr__(self, *args, **kwargs):
111 def __repr__(self, *args, **kwargs):
112 return ((r'<status modified=%s, added=%s, removed=%s, deleted=%s, '
112 return ((r'<status modified=%s, added=%s, removed=%s, deleted=%s, '
113 r'unknown=%s, ignored=%s, clean=%s>') %
113 r'unknown=%s, ignored=%s, clean=%s>') %
114 tuple(pycompat.sysstr(stringutil.pprint(v)) for v in self))
114 tuple(pycompat.sysstr(stringutil.pprint(v)) for v in self))
115
115
116 def itersubrepos(ctx1, ctx2):
116 def itersubrepos(ctx1, ctx2):
117 """find subrepos in ctx1 or ctx2"""
117 """find subrepos in ctx1 or ctx2"""
118 # Create a (subpath, ctx) mapping where we prefer subpaths from
118 # Create a (subpath, ctx) mapping where we prefer subpaths from
119 # ctx1. The subpaths from ctx2 are important when the .hgsub file
119 # ctx1. The subpaths from ctx2 are important when the .hgsub file
120 # has been modified (in ctx2) but not yet committed (in ctx1).
120 # has been modified (in ctx2) but not yet committed (in ctx1).
121 subpaths = dict.fromkeys(ctx2.substate, ctx2)
121 subpaths = dict.fromkeys(ctx2.substate, ctx2)
122 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
122 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
123
123
124 missing = set()
124 missing = set()
125
125
126 for subpath in ctx2.substate:
126 for subpath in ctx2.substate:
127 if subpath not in ctx1.substate:
127 if subpath not in ctx1.substate:
128 del subpaths[subpath]
128 del subpaths[subpath]
129 missing.add(subpath)
129 missing.add(subpath)
130
130
131 for subpath, ctx in sorted(subpaths.iteritems()):
131 for subpath, ctx in sorted(subpaths.iteritems()):
132 yield subpath, ctx.sub(subpath)
132 yield subpath, ctx.sub(subpath)
133
133
134 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
134 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
135 # status and diff will have an accurate result when it does
135 # status and diff will have an accurate result when it does
136 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
136 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
137 # against itself.
137 # against itself.
138 for subpath in missing:
138 for subpath in missing:
139 yield subpath, ctx2.nullsub(subpath, ctx1)
139 yield subpath, ctx2.nullsub(subpath, ctx1)
140
140
141 def nochangesfound(ui, repo, excluded=None):
141 def nochangesfound(ui, repo, excluded=None):
142 '''Report no changes for push/pull, excluded is None or a list of
142 '''Report no changes for push/pull, excluded is None or a list of
143 nodes excluded from the push/pull.
143 nodes excluded from the push/pull.
144 '''
144 '''
145 secretlist = []
145 secretlist = []
146 if excluded:
146 if excluded:
147 for n in excluded:
147 for n in excluded:
148 ctx = repo[n]
148 ctx = repo[n]
149 if ctx.phase() >= phases.secret and not ctx.extinct():
149 if ctx.phase() >= phases.secret and not ctx.extinct():
150 secretlist.append(n)
150 secretlist.append(n)
151
151
152 if secretlist:
152 if secretlist:
153 ui.status(_("no changes found (ignored %d secret changesets)\n")
153 ui.status(_("no changes found (ignored %d secret changesets)\n")
154 % len(secretlist))
154 % len(secretlist))
155 else:
155 else:
156 ui.status(_("no changes found\n"))
156 ui.status(_("no changes found\n"))
157
157
158 def callcatch(ui, func):
158 def callcatch(ui, func):
159 """call func() with global exception handling
159 """call func() with global exception handling
160
160
161 return func() if no exception happens. otherwise do some error handling
161 return func() if no exception happens. otherwise do some error handling
162 and return an exit code accordingly. does not handle all exceptions.
162 and return an exit code accordingly. does not handle all exceptions.
163 """
163 """
164 try:
164 try:
165 try:
165 try:
166 return func()
166 return func()
167 except: # re-raises
167 except: # re-raises
168 ui.traceback()
168 ui.traceback()
169 raise
169 raise
170 # Global exception handling, alphabetically
170 # Global exception handling, alphabetically
171 # Mercurial-specific first, followed by built-in and library exceptions
171 # Mercurial-specific first, followed by built-in and library exceptions
172 except error.LockHeld as inst:
172 except error.LockHeld as inst:
173 if inst.errno == errno.ETIMEDOUT:
173 if inst.errno == errno.ETIMEDOUT:
174 reason = _('timed out waiting for lock held by %r') % (
174 reason = _('timed out waiting for lock held by %r') % (
175 pycompat.bytestr(inst.locker))
175 pycompat.bytestr(inst.locker))
176 else:
176 else:
177 reason = _('lock held by %r') % inst.locker
177 reason = _('lock held by %r') % inst.locker
178 ui.error(_("abort: %s: %s\n") % (
178 ui.error(_("abort: %s: %s\n") % (
179 inst.desc or stringutil.forcebytestr(inst.filename), reason))
179 inst.desc or stringutil.forcebytestr(inst.filename), reason))
180 if not inst.locker:
180 if not inst.locker:
181 ui.error(_("(lock might be very busy)\n"))
181 ui.error(_("(lock might be very busy)\n"))
182 except error.LockUnavailable as inst:
182 except error.LockUnavailable as inst:
183 ui.error(_("abort: could not lock %s: %s\n") %
183 ui.error(_("abort: could not lock %s: %s\n") %
184 (inst.desc or stringutil.forcebytestr(inst.filename),
184 (inst.desc or stringutil.forcebytestr(inst.filename),
185 encoding.strtolocal(inst.strerror)))
185 encoding.strtolocal(inst.strerror)))
186 except error.OutOfBandError as inst:
186 except error.OutOfBandError as inst:
187 if inst.args:
187 if inst.args:
188 msg = _("abort: remote error:\n")
188 msg = _("abort: remote error:\n")
189 else:
189 else:
190 msg = _("abort: remote error\n")
190 msg = _("abort: remote error\n")
191 ui.error(msg)
191 ui.error(msg)
192 if inst.args:
192 if inst.args:
193 ui.error(''.join(inst.args))
193 ui.error(''.join(inst.args))
194 if inst.hint:
194 if inst.hint:
195 ui.error('(%s)\n' % inst.hint)
195 ui.error('(%s)\n' % inst.hint)
196 except error.RepoError as inst:
196 except error.RepoError as inst:
197 ui.error(_("abort: %s!\n") % inst)
197 ui.error(_("abort: %s!\n") % inst)
198 if inst.hint:
198 if inst.hint:
199 ui.error(_("(%s)\n") % inst.hint)
199 ui.error(_("(%s)\n") % inst.hint)
200 except error.ResponseError as inst:
200 except error.ResponseError as inst:
201 ui.error(_("abort: %s") % inst.args[0])
201 ui.error(_("abort: %s") % inst.args[0])
202 msg = inst.args[1]
202 msg = inst.args[1]
203 if isinstance(msg, type(u'')):
203 if isinstance(msg, type(u'')):
204 msg = pycompat.sysbytes(msg)
204 msg = pycompat.sysbytes(msg)
205 if not isinstance(msg, bytes):
205 if not isinstance(msg, bytes):
206 ui.error(" %r\n" % (msg,))
206 ui.error(" %r\n" % (msg,))
207 elif not msg:
207 elif not msg:
208 ui.error(_(" empty string\n"))
208 ui.error(_(" empty string\n"))
209 else:
209 else:
210 ui.error("\n%r\n" % pycompat.bytestr(stringutil.ellipsis(msg)))
210 ui.error("\n%r\n" % pycompat.bytestr(stringutil.ellipsis(msg)))
211 except error.CensoredNodeError as inst:
211 except error.CensoredNodeError as inst:
212 ui.error(_("abort: file censored %s!\n") % inst)
212 ui.error(_("abort: file censored %s!\n") % inst)
213 except error.StorageError as inst:
213 except error.StorageError as inst:
214 ui.error(_("abort: %s!\n") % inst)
214 ui.error(_("abort: %s!\n") % inst)
215 if inst.hint:
216 ui.error(_("(%s)\n") % inst.hint)
215 except error.InterventionRequired as inst:
217 except error.InterventionRequired as inst:
216 ui.error("%s\n" % inst)
218 ui.error("%s\n" % inst)
217 if inst.hint:
219 if inst.hint:
218 ui.error(_("(%s)\n") % inst.hint)
220 ui.error(_("(%s)\n") % inst.hint)
219 return 1
221 return 1
220 except error.WdirUnsupported:
222 except error.WdirUnsupported:
221 ui.error(_("abort: working directory revision cannot be specified\n"))
223 ui.error(_("abort: working directory revision cannot be specified\n"))
222 except error.Abort as inst:
224 except error.Abort as inst:
223 ui.error(_("abort: %s\n") % inst)
225 ui.error(_("abort: %s\n") % inst)
224 if inst.hint:
226 if inst.hint:
225 ui.error(_("(%s)\n") % inst.hint)
227 ui.error(_("(%s)\n") % inst.hint)
226 except ImportError as inst:
228 except ImportError as inst:
227 ui.error(_("abort: %s!\n") % stringutil.forcebytestr(inst))
229 ui.error(_("abort: %s!\n") % stringutil.forcebytestr(inst))
228 m = stringutil.forcebytestr(inst).split()[-1]
230 m = stringutil.forcebytestr(inst).split()[-1]
229 if m in "mpatch bdiff".split():
231 if m in "mpatch bdiff".split():
230 ui.error(_("(did you forget to compile extensions?)\n"))
232 ui.error(_("(did you forget to compile extensions?)\n"))
231 elif m in "zlib".split():
233 elif m in "zlib".split():
232 ui.error(_("(is your Python install correct?)\n"))
234 ui.error(_("(is your Python install correct?)\n"))
233 except IOError as inst:
235 except IOError as inst:
234 if util.safehasattr(inst, "code"):
236 if util.safehasattr(inst, "code"):
235 ui.error(_("abort: %s\n") % stringutil.forcebytestr(inst))
237 ui.error(_("abort: %s\n") % stringutil.forcebytestr(inst))
236 elif util.safehasattr(inst, "reason"):
238 elif util.safehasattr(inst, "reason"):
237 try: # usually it is in the form (errno, strerror)
239 try: # usually it is in the form (errno, strerror)
238 reason = inst.reason.args[1]
240 reason = inst.reason.args[1]
239 except (AttributeError, IndexError):
241 except (AttributeError, IndexError):
240 # it might be anything, for example a string
242 # it might be anything, for example a string
241 reason = inst.reason
243 reason = inst.reason
242 if isinstance(reason, pycompat.unicode):
244 if isinstance(reason, pycompat.unicode):
243 # SSLError of Python 2.7.9 contains a unicode
245 # SSLError of Python 2.7.9 contains a unicode
244 reason = encoding.unitolocal(reason)
246 reason = encoding.unitolocal(reason)
245 ui.error(_("abort: error: %s\n") % reason)
247 ui.error(_("abort: error: %s\n") % reason)
246 elif (util.safehasattr(inst, "args")
248 elif (util.safehasattr(inst, "args")
247 and inst.args and inst.args[0] == errno.EPIPE):
249 and inst.args and inst.args[0] == errno.EPIPE):
248 pass
250 pass
249 elif getattr(inst, "strerror", None):
251 elif getattr(inst, "strerror", None):
250 if getattr(inst, "filename", None):
252 if getattr(inst, "filename", None):
251 ui.error(_("abort: %s: %s\n") % (
253 ui.error(_("abort: %s: %s\n") % (
252 encoding.strtolocal(inst.strerror),
254 encoding.strtolocal(inst.strerror),
253 stringutil.forcebytestr(inst.filename)))
255 stringutil.forcebytestr(inst.filename)))
254 else:
256 else:
255 ui.error(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
257 ui.error(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
256 else:
258 else:
257 raise
259 raise
258 except OSError as inst:
260 except OSError as inst:
259 if getattr(inst, "filename", None) is not None:
261 if getattr(inst, "filename", None) is not None:
260 ui.error(_("abort: %s: '%s'\n") % (
262 ui.error(_("abort: %s: '%s'\n") % (
261 encoding.strtolocal(inst.strerror),
263 encoding.strtolocal(inst.strerror),
262 stringutil.forcebytestr(inst.filename)))
264 stringutil.forcebytestr(inst.filename)))
263 else:
265 else:
264 ui.error(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
266 ui.error(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
265 except MemoryError:
267 except MemoryError:
266 ui.error(_("abort: out of memory\n"))
268 ui.error(_("abort: out of memory\n"))
267 except SystemExit as inst:
269 except SystemExit as inst:
268 # Commands shouldn't sys.exit directly, but give a return code.
270 # Commands shouldn't sys.exit directly, but give a return code.
269 # Just in case catch this and and pass exit code to caller.
271 # Just in case catch this and and pass exit code to caller.
270 return inst.code
272 return inst.code
271 except socket.error as inst:
273 except socket.error as inst:
272 ui.error(_("abort: %s\n") % stringutil.forcebytestr(inst.args[-1]))
274 ui.error(_("abort: %s\n") % stringutil.forcebytestr(inst.args[-1]))
273
275
274 return -1
276 return -1
275
277
276 def checknewlabel(repo, lbl, kind):
278 def checknewlabel(repo, lbl, kind):
277 # Do not use the "kind" parameter in ui output.
279 # Do not use the "kind" parameter in ui output.
278 # It makes strings difficult to translate.
280 # It makes strings difficult to translate.
279 if lbl in ['tip', '.', 'null']:
281 if lbl in ['tip', '.', 'null']:
280 raise error.Abort(_("the name '%s' is reserved") % lbl)
282 raise error.Abort(_("the name '%s' is reserved") % lbl)
281 for c in (':', '\0', '\n', '\r'):
283 for c in (':', '\0', '\n', '\r'):
282 if c in lbl:
284 if c in lbl:
283 raise error.Abort(
285 raise error.Abort(
284 _("%r cannot be used in a name") % pycompat.bytestr(c))
286 _("%r cannot be used in a name") % pycompat.bytestr(c))
285 try:
287 try:
286 int(lbl)
288 int(lbl)
287 raise error.Abort(_("cannot use an integer as a name"))
289 raise error.Abort(_("cannot use an integer as a name"))
288 except ValueError:
290 except ValueError:
289 pass
291 pass
290 if lbl.strip() != lbl:
292 if lbl.strip() != lbl:
291 raise error.Abort(_("leading or trailing whitespace in name %r") % lbl)
293 raise error.Abort(_("leading or trailing whitespace in name %r") % lbl)
292
294
293 def checkfilename(f):
295 def checkfilename(f):
294 '''Check that the filename f is an acceptable filename for a tracked file'''
296 '''Check that the filename f is an acceptable filename for a tracked file'''
295 if '\r' in f or '\n' in f:
297 if '\r' in f or '\n' in f:
296 raise error.Abort(_("'\\n' and '\\r' disallowed in filenames: %r")
298 raise error.Abort(_("'\\n' and '\\r' disallowed in filenames: %r")
297 % pycompat.bytestr(f))
299 % pycompat.bytestr(f))
298
300
299 def checkportable(ui, f):
301 def checkportable(ui, f):
300 '''Check if filename f is portable and warn or abort depending on config'''
302 '''Check if filename f is portable and warn or abort depending on config'''
301 checkfilename(f)
303 checkfilename(f)
302 abort, warn = checkportabilityalert(ui)
304 abort, warn = checkportabilityalert(ui)
303 if abort or warn:
305 if abort or warn:
304 msg = util.checkwinfilename(f)
306 msg = util.checkwinfilename(f)
305 if msg:
307 if msg:
306 msg = "%s: %s" % (msg, procutil.shellquote(f))
308 msg = "%s: %s" % (msg, procutil.shellquote(f))
307 if abort:
309 if abort:
308 raise error.Abort(msg)
310 raise error.Abort(msg)
309 ui.warn(_("warning: %s\n") % msg)
311 ui.warn(_("warning: %s\n") % msg)
310
312
311 def checkportabilityalert(ui):
313 def checkportabilityalert(ui):
312 '''check if the user's config requests nothing, a warning, or abort for
314 '''check if the user's config requests nothing, a warning, or abort for
313 non-portable filenames'''
315 non-portable filenames'''
314 val = ui.config('ui', 'portablefilenames')
316 val = ui.config('ui', 'portablefilenames')
315 lval = val.lower()
317 lval = val.lower()
316 bval = stringutil.parsebool(val)
318 bval = stringutil.parsebool(val)
317 abort = pycompat.iswindows or lval == 'abort'
319 abort = pycompat.iswindows or lval == 'abort'
318 warn = bval or lval == 'warn'
320 warn = bval or lval == 'warn'
319 if bval is None and not (warn or abort or lval == 'ignore'):
321 if bval is None and not (warn or abort or lval == 'ignore'):
320 raise error.ConfigError(
322 raise error.ConfigError(
321 _("ui.portablefilenames value is invalid ('%s')") % val)
323 _("ui.portablefilenames value is invalid ('%s')") % val)
322 return abort, warn
324 return abort, warn
323
325
324 class casecollisionauditor(object):
326 class casecollisionauditor(object):
325 def __init__(self, ui, abort, dirstate):
327 def __init__(self, ui, abort, dirstate):
326 self._ui = ui
328 self._ui = ui
327 self._abort = abort
329 self._abort = abort
328 allfiles = '\0'.join(dirstate._map)
330 allfiles = '\0'.join(dirstate._map)
329 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
331 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
330 self._dirstate = dirstate
332 self._dirstate = dirstate
331 # The purpose of _newfiles is so that we don't complain about
333 # The purpose of _newfiles is so that we don't complain about
332 # case collisions if someone were to call this object with the
334 # case collisions if someone were to call this object with the
333 # same filename twice.
335 # same filename twice.
334 self._newfiles = set()
336 self._newfiles = set()
335
337
336 def __call__(self, f):
338 def __call__(self, f):
337 if f in self._newfiles:
339 if f in self._newfiles:
338 return
340 return
339 fl = encoding.lower(f)
341 fl = encoding.lower(f)
340 if fl in self._loweredfiles and f not in self._dirstate:
342 if fl in self._loweredfiles and f not in self._dirstate:
341 msg = _('possible case-folding collision for %s') % f
343 msg = _('possible case-folding collision for %s') % f
342 if self._abort:
344 if self._abort:
343 raise error.Abort(msg)
345 raise error.Abort(msg)
344 self._ui.warn(_("warning: %s\n") % msg)
346 self._ui.warn(_("warning: %s\n") % msg)
345 self._loweredfiles.add(fl)
347 self._loweredfiles.add(fl)
346 self._newfiles.add(f)
348 self._newfiles.add(f)
347
349
348 def filteredhash(repo, maxrev):
350 def filteredhash(repo, maxrev):
349 """build hash of filtered revisions in the current repoview.
351 """build hash of filtered revisions in the current repoview.
350
352
351 Multiple caches perform up-to-date validation by checking that the
353 Multiple caches perform up-to-date validation by checking that the
352 tiprev and tipnode stored in the cache file match the current repository.
354 tiprev and tipnode stored in the cache file match the current repository.
353 However, this is not sufficient for validating repoviews because the set
355 However, this is not sufficient for validating repoviews because the set
354 of revisions in the view may change without the repository tiprev and
356 of revisions in the view may change without the repository tiprev and
355 tipnode changing.
357 tipnode changing.
356
358
357 This function hashes all the revs filtered from the view and returns
359 This function hashes all the revs filtered from the view and returns
358 that SHA-1 digest.
360 that SHA-1 digest.
359 """
361 """
360 cl = repo.changelog
362 cl = repo.changelog
361 if not cl.filteredrevs:
363 if not cl.filteredrevs:
362 return None
364 return None
363 key = None
365 key = None
364 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
366 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
365 if revs:
367 if revs:
366 s = hashlib.sha1()
368 s = hashlib.sha1()
367 for rev in revs:
369 for rev in revs:
368 s.update('%d;' % rev)
370 s.update('%d;' % rev)
369 key = s.digest()
371 key = s.digest()
370 return key
372 return key
371
373
372 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
374 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
373 '''yield every hg repository under path, always recursively.
375 '''yield every hg repository under path, always recursively.
374 The recurse flag will only control recursion into repo working dirs'''
376 The recurse flag will only control recursion into repo working dirs'''
375 def errhandler(err):
377 def errhandler(err):
376 if err.filename == path:
378 if err.filename == path:
377 raise err
379 raise err
378 samestat = getattr(os.path, 'samestat', None)
380 samestat = getattr(os.path, 'samestat', None)
379 if followsym and samestat is not None:
381 if followsym and samestat is not None:
380 def adddir(dirlst, dirname):
382 def adddir(dirlst, dirname):
381 dirstat = os.stat(dirname)
383 dirstat = os.stat(dirname)
382 match = any(samestat(dirstat, lstdirstat) for lstdirstat in dirlst)
384 match = any(samestat(dirstat, lstdirstat) for lstdirstat in dirlst)
383 if not match:
385 if not match:
384 dirlst.append(dirstat)
386 dirlst.append(dirstat)
385 return not match
387 return not match
386 else:
388 else:
387 followsym = False
389 followsym = False
388
390
389 if (seen_dirs is None) and followsym:
391 if (seen_dirs is None) and followsym:
390 seen_dirs = []
392 seen_dirs = []
391 adddir(seen_dirs, path)
393 adddir(seen_dirs, path)
392 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
394 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
393 dirs.sort()
395 dirs.sort()
394 if '.hg' in dirs:
396 if '.hg' in dirs:
395 yield root # found a repository
397 yield root # found a repository
396 qroot = os.path.join(root, '.hg', 'patches')
398 qroot = os.path.join(root, '.hg', 'patches')
397 if os.path.isdir(os.path.join(qroot, '.hg')):
399 if os.path.isdir(os.path.join(qroot, '.hg')):
398 yield qroot # we have a patch queue repo here
400 yield qroot # we have a patch queue repo here
399 if recurse:
401 if recurse:
400 # avoid recursing inside the .hg directory
402 # avoid recursing inside the .hg directory
401 dirs.remove('.hg')
403 dirs.remove('.hg')
402 else:
404 else:
403 dirs[:] = [] # don't descend further
405 dirs[:] = [] # don't descend further
404 elif followsym:
406 elif followsym:
405 newdirs = []
407 newdirs = []
406 for d in dirs:
408 for d in dirs:
407 fname = os.path.join(root, d)
409 fname = os.path.join(root, d)
408 if adddir(seen_dirs, fname):
410 if adddir(seen_dirs, fname):
409 if os.path.islink(fname):
411 if os.path.islink(fname):
410 for hgname in walkrepos(fname, True, seen_dirs):
412 for hgname in walkrepos(fname, True, seen_dirs):
411 yield hgname
413 yield hgname
412 else:
414 else:
413 newdirs.append(d)
415 newdirs.append(d)
414 dirs[:] = newdirs
416 dirs[:] = newdirs
415
417
416 def binnode(ctx):
418 def binnode(ctx):
417 """Return binary node id for a given basectx"""
419 """Return binary node id for a given basectx"""
418 node = ctx.node()
420 node = ctx.node()
419 if node is None:
421 if node is None:
420 return wdirid
422 return wdirid
421 return node
423 return node
422
424
423 def intrev(ctx):
425 def intrev(ctx):
424 """Return integer for a given basectx that can be used in comparison or
426 """Return integer for a given basectx that can be used in comparison or
425 arithmetic operation"""
427 arithmetic operation"""
426 rev = ctx.rev()
428 rev = ctx.rev()
427 if rev is None:
429 if rev is None:
428 return wdirrev
430 return wdirrev
429 return rev
431 return rev
430
432
431 def formatchangeid(ctx):
433 def formatchangeid(ctx):
432 """Format changectx as '{rev}:{node|formatnode}', which is the default
434 """Format changectx as '{rev}:{node|formatnode}', which is the default
433 template provided by logcmdutil.changesettemplater"""
435 template provided by logcmdutil.changesettemplater"""
434 repo = ctx.repo()
436 repo = ctx.repo()
435 return formatrevnode(repo.ui, intrev(ctx), binnode(ctx))
437 return formatrevnode(repo.ui, intrev(ctx), binnode(ctx))
436
438
437 def formatrevnode(ui, rev, node):
439 def formatrevnode(ui, rev, node):
438 """Format given revision and node depending on the current verbosity"""
440 """Format given revision and node depending on the current verbosity"""
439 if ui.debugflag:
441 if ui.debugflag:
440 hexfunc = hex
442 hexfunc = hex
441 else:
443 else:
442 hexfunc = short
444 hexfunc = short
443 return '%d:%s' % (rev, hexfunc(node))
445 return '%d:%s' % (rev, hexfunc(node))
444
446
445 def resolvehexnodeidprefix(repo, prefix):
447 def resolvehexnodeidprefix(repo, prefix):
446 if (prefix.startswith('x') and
448 if (prefix.startswith('x') and
447 repo.ui.configbool('experimental', 'revisions.prefixhexnode')):
449 repo.ui.configbool('experimental', 'revisions.prefixhexnode')):
448 prefix = prefix[1:]
450 prefix = prefix[1:]
449 try:
451 try:
450 # Uses unfiltered repo because it's faster when prefix is ambiguous/
452 # Uses unfiltered repo because it's faster when prefix is ambiguous/
451 # This matches the shortesthexnodeidprefix() function below.
453 # This matches the shortesthexnodeidprefix() function below.
452 node = repo.unfiltered().changelog._partialmatch(prefix)
454 node = repo.unfiltered().changelog._partialmatch(prefix)
453 except error.AmbiguousPrefixLookupError:
455 except error.AmbiguousPrefixLookupError:
454 revset = repo.ui.config('experimental', 'revisions.disambiguatewithin')
456 revset = repo.ui.config('experimental', 'revisions.disambiguatewithin')
455 if revset:
457 if revset:
456 # Clear config to avoid infinite recursion
458 # Clear config to avoid infinite recursion
457 configoverrides = {('experimental',
459 configoverrides = {('experimental',
458 'revisions.disambiguatewithin'): None}
460 'revisions.disambiguatewithin'): None}
459 with repo.ui.configoverride(configoverrides):
461 with repo.ui.configoverride(configoverrides):
460 revs = repo.anyrevs([revset], user=True)
462 revs = repo.anyrevs([revset], user=True)
461 matches = []
463 matches = []
462 for rev in revs:
464 for rev in revs:
463 node = repo.changelog.node(rev)
465 node = repo.changelog.node(rev)
464 if hex(node).startswith(prefix):
466 if hex(node).startswith(prefix):
465 matches.append(node)
467 matches.append(node)
466 if len(matches) == 1:
468 if len(matches) == 1:
467 return matches[0]
469 return matches[0]
468 raise
470 raise
469 if node is None:
471 if node is None:
470 return
472 return
471 repo.changelog.rev(node) # make sure node isn't filtered
473 repo.changelog.rev(node) # make sure node isn't filtered
472 return node
474 return node
473
475
474 def mayberevnum(repo, prefix):
476 def mayberevnum(repo, prefix):
475 """Checks if the given prefix may be mistaken for a revision number"""
477 """Checks if the given prefix may be mistaken for a revision number"""
476 try:
478 try:
477 i = int(prefix)
479 i = int(prefix)
478 # if we are a pure int, then starting with zero will not be
480 # if we are a pure int, then starting with zero will not be
479 # confused as a rev; or, obviously, if the int is larger
481 # confused as a rev; or, obviously, if the int is larger
480 # than the value of the tip rev. We still need to disambiguate if
482 # than the value of the tip rev. We still need to disambiguate if
481 # prefix == '0', since that *is* a valid revnum.
483 # prefix == '0', since that *is* a valid revnum.
482 if (prefix != b'0' and prefix[0:1] == b'0') or i >= len(repo):
484 if (prefix != b'0' and prefix[0:1] == b'0') or i >= len(repo):
483 return False
485 return False
484 return True
486 return True
485 except ValueError:
487 except ValueError:
486 return False
488 return False
487
489
488 def shortesthexnodeidprefix(repo, node, minlength=1, cache=None):
490 def shortesthexnodeidprefix(repo, node, minlength=1, cache=None):
489 """Find the shortest unambiguous prefix that matches hexnode.
491 """Find the shortest unambiguous prefix that matches hexnode.
490
492
491 If "cache" is not None, it must be a dictionary that can be used for
493 If "cache" is not None, it must be a dictionary that can be used for
492 caching between calls to this method.
494 caching between calls to this method.
493 """
495 """
494 # _partialmatch() of filtered changelog could take O(len(repo)) time,
496 # _partialmatch() of filtered changelog could take O(len(repo)) time,
495 # which would be unacceptably slow. so we look for hash collision in
497 # which would be unacceptably slow. so we look for hash collision in
496 # unfiltered space, which means some hashes may be slightly longer.
498 # unfiltered space, which means some hashes may be slightly longer.
497
499
498 minlength=max(minlength, 1)
500 minlength=max(minlength, 1)
499
501
500 def disambiguate(prefix):
502 def disambiguate(prefix):
501 """Disambiguate against revnums."""
503 """Disambiguate against revnums."""
502 if repo.ui.configbool('experimental', 'revisions.prefixhexnode'):
504 if repo.ui.configbool('experimental', 'revisions.prefixhexnode'):
503 if mayberevnum(repo, prefix):
505 if mayberevnum(repo, prefix):
504 return 'x' + prefix
506 return 'x' + prefix
505 else:
507 else:
506 return prefix
508 return prefix
507
509
508 hexnode = hex(node)
510 hexnode = hex(node)
509 for length in range(len(prefix), len(hexnode) + 1):
511 for length in range(len(prefix), len(hexnode) + 1):
510 prefix = hexnode[:length]
512 prefix = hexnode[:length]
511 if not mayberevnum(repo, prefix):
513 if not mayberevnum(repo, prefix):
512 return prefix
514 return prefix
513
515
514 cl = repo.unfiltered().changelog
516 cl = repo.unfiltered().changelog
515 revset = repo.ui.config('experimental', 'revisions.disambiguatewithin')
517 revset = repo.ui.config('experimental', 'revisions.disambiguatewithin')
516 if revset:
518 if revset:
517 revs = None
519 revs = None
518 if cache is not None:
520 if cache is not None:
519 revs = cache.get('disambiguationrevset')
521 revs = cache.get('disambiguationrevset')
520 if revs is None:
522 if revs is None:
521 revs = repo.anyrevs([revset], user=True)
523 revs = repo.anyrevs([revset], user=True)
522 if cache is not None:
524 if cache is not None:
523 cache['disambiguationrevset'] = revs
525 cache['disambiguationrevset'] = revs
524 if cl.rev(node) in revs:
526 if cl.rev(node) in revs:
525 hexnode = hex(node)
527 hexnode = hex(node)
526 nodetree = None
528 nodetree = None
527 if cache is not None:
529 if cache is not None:
528 nodetree = cache.get('disambiguationnodetree')
530 nodetree = cache.get('disambiguationnodetree')
529 if not nodetree:
531 if not nodetree:
530 try:
532 try:
531 nodetree = parsers.nodetree(cl.index, len(revs))
533 nodetree = parsers.nodetree(cl.index, len(revs))
532 except AttributeError:
534 except AttributeError:
533 # no native nodetree
535 # no native nodetree
534 pass
536 pass
535 else:
537 else:
536 for r in revs:
538 for r in revs:
537 nodetree.insert(r)
539 nodetree.insert(r)
538 if cache is not None:
540 if cache is not None:
539 cache['disambiguationnodetree'] = nodetree
541 cache['disambiguationnodetree'] = nodetree
540 if nodetree is not None:
542 if nodetree is not None:
541 length = max(nodetree.shortest(node), minlength)
543 length = max(nodetree.shortest(node), minlength)
542 prefix = hexnode[:length]
544 prefix = hexnode[:length]
543 return disambiguate(prefix)
545 return disambiguate(prefix)
544 for length in range(minlength, len(hexnode) + 1):
546 for length in range(minlength, len(hexnode) + 1):
545 matches = []
547 matches = []
546 prefix = hexnode[:length]
548 prefix = hexnode[:length]
547 for rev in revs:
549 for rev in revs:
548 otherhexnode = repo[rev].hex()
550 otherhexnode = repo[rev].hex()
549 if prefix == otherhexnode[:length]:
551 if prefix == otherhexnode[:length]:
550 matches.append(otherhexnode)
552 matches.append(otherhexnode)
551 if len(matches) == 1:
553 if len(matches) == 1:
552 return disambiguate(prefix)
554 return disambiguate(prefix)
553
555
554 try:
556 try:
555 return disambiguate(cl.shortest(node, minlength))
557 return disambiguate(cl.shortest(node, minlength))
556 except error.LookupError:
558 except error.LookupError:
557 raise error.RepoLookupError()
559 raise error.RepoLookupError()
558
560
559 def isrevsymbol(repo, symbol):
561 def isrevsymbol(repo, symbol):
560 """Checks if a symbol exists in the repo.
562 """Checks if a symbol exists in the repo.
561
563
562 See revsymbol() for details. Raises error.AmbiguousPrefixLookupError if the
564 See revsymbol() for details. Raises error.AmbiguousPrefixLookupError if the
563 symbol is an ambiguous nodeid prefix.
565 symbol is an ambiguous nodeid prefix.
564 """
566 """
565 try:
567 try:
566 revsymbol(repo, symbol)
568 revsymbol(repo, symbol)
567 return True
569 return True
568 except error.RepoLookupError:
570 except error.RepoLookupError:
569 return False
571 return False
570
572
571 def revsymbol(repo, symbol):
573 def revsymbol(repo, symbol):
572 """Returns a context given a single revision symbol (as string).
574 """Returns a context given a single revision symbol (as string).
573
575
574 This is similar to revsingle(), but accepts only a single revision symbol,
576 This is similar to revsingle(), but accepts only a single revision symbol,
575 i.e. things like ".", "tip", "1234", "deadbeef", "my-bookmark" work, but
577 i.e. things like ".", "tip", "1234", "deadbeef", "my-bookmark" work, but
576 not "max(public())".
578 not "max(public())".
577 """
579 """
578 if not isinstance(symbol, bytes):
580 if not isinstance(symbol, bytes):
579 msg = ("symbol (%s of type %s) was not a string, did you mean "
581 msg = ("symbol (%s of type %s) was not a string, did you mean "
580 "repo[symbol]?" % (symbol, type(symbol)))
582 "repo[symbol]?" % (symbol, type(symbol)))
581 raise error.ProgrammingError(msg)
583 raise error.ProgrammingError(msg)
582 try:
584 try:
583 if symbol in ('.', 'tip', 'null'):
585 if symbol in ('.', 'tip', 'null'):
584 return repo[symbol]
586 return repo[symbol]
585
587
586 try:
588 try:
587 r = int(symbol)
589 r = int(symbol)
588 if '%d' % r != symbol:
590 if '%d' % r != symbol:
589 raise ValueError
591 raise ValueError
590 l = len(repo.changelog)
592 l = len(repo.changelog)
591 if r < 0:
593 if r < 0:
592 r += l
594 r += l
593 if r < 0 or r >= l and r != wdirrev:
595 if r < 0 or r >= l and r != wdirrev:
594 raise ValueError
596 raise ValueError
595 return repo[r]
597 return repo[r]
596 except error.FilteredIndexError:
598 except error.FilteredIndexError:
597 raise
599 raise
598 except (ValueError, OverflowError, IndexError):
600 except (ValueError, OverflowError, IndexError):
599 pass
601 pass
600
602
601 if len(symbol) == 40:
603 if len(symbol) == 40:
602 try:
604 try:
603 node = bin(symbol)
605 node = bin(symbol)
604 rev = repo.changelog.rev(node)
606 rev = repo.changelog.rev(node)
605 return repo[rev]
607 return repo[rev]
606 except error.FilteredLookupError:
608 except error.FilteredLookupError:
607 raise
609 raise
608 except (TypeError, LookupError):
610 except (TypeError, LookupError):
609 pass
611 pass
610
612
611 # look up bookmarks through the name interface
613 # look up bookmarks through the name interface
612 try:
614 try:
613 node = repo.names.singlenode(repo, symbol)
615 node = repo.names.singlenode(repo, symbol)
614 rev = repo.changelog.rev(node)
616 rev = repo.changelog.rev(node)
615 return repo[rev]
617 return repo[rev]
616 except KeyError:
618 except KeyError:
617 pass
619 pass
618
620
619 node = resolvehexnodeidprefix(repo, symbol)
621 node = resolvehexnodeidprefix(repo, symbol)
620 if node is not None:
622 if node is not None:
621 rev = repo.changelog.rev(node)
623 rev = repo.changelog.rev(node)
622 return repo[rev]
624 return repo[rev]
623
625
624 raise error.RepoLookupError(_("unknown revision '%s'") % symbol)
626 raise error.RepoLookupError(_("unknown revision '%s'") % symbol)
625
627
626 except error.WdirUnsupported:
628 except error.WdirUnsupported:
627 return repo[None]
629 return repo[None]
628 except (error.FilteredIndexError, error.FilteredLookupError,
630 except (error.FilteredIndexError, error.FilteredLookupError,
629 error.FilteredRepoLookupError):
631 error.FilteredRepoLookupError):
630 raise _filterederror(repo, symbol)
632 raise _filterederror(repo, symbol)
631
633
632 def _filterederror(repo, changeid):
634 def _filterederror(repo, changeid):
633 """build an exception to be raised about a filtered changeid
635 """build an exception to be raised about a filtered changeid
634
636
635 This is extracted in a function to help extensions (eg: evolve) to
637 This is extracted in a function to help extensions (eg: evolve) to
636 experiment with various message variants."""
638 experiment with various message variants."""
637 if repo.filtername.startswith('visible'):
639 if repo.filtername.startswith('visible'):
638
640
639 # Check if the changeset is obsolete
641 # Check if the changeset is obsolete
640 unfilteredrepo = repo.unfiltered()
642 unfilteredrepo = repo.unfiltered()
641 ctx = revsymbol(unfilteredrepo, changeid)
643 ctx = revsymbol(unfilteredrepo, changeid)
642
644
643 # If the changeset is obsolete, enrich the message with the reason
645 # If the changeset is obsolete, enrich the message with the reason
644 # that made this changeset not visible
646 # that made this changeset not visible
645 if ctx.obsolete():
647 if ctx.obsolete():
646 msg = obsutil._getfilteredreason(repo, changeid, ctx)
648 msg = obsutil._getfilteredreason(repo, changeid, ctx)
647 else:
649 else:
648 msg = _("hidden revision '%s'") % changeid
650 msg = _("hidden revision '%s'") % changeid
649
651
650 hint = _('use --hidden to access hidden revisions')
652 hint = _('use --hidden to access hidden revisions')
651
653
652 return error.FilteredRepoLookupError(msg, hint=hint)
654 return error.FilteredRepoLookupError(msg, hint=hint)
653 msg = _("filtered revision '%s' (not in '%s' subset)")
655 msg = _("filtered revision '%s' (not in '%s' subset)")
654 msg %= (changeid, repo.filtername)
656 msg %= (changeid, repo.filtername)
655 return error.FilteredRepoLookupError(msg)
657 return error.FilteredRepoLookupError(msg)
656
658
657 def revsingle(repo, revspec, default='.', localalias=None):
659 def revsingle(repo, revspec, default='.', localalias=None):
658 if not revspec and revspec != 0:
660 if not revspec and revspec != 0:
659 return repo[default]
661 return repo[default]
660
662
661 l = revrange(repo, [revspec], localalias=localalias)
663 l = revrange(repo, [revspec], localalias=localalias)
662 if not l:
664 if not l:
663 raise error.Abort(_('empty revision set'))
665 raise error.Abort(_('empty revision set'))
664 return repo[l.last()]
666 return repo[l.last()]
665
667
666 def _pairspec(revspec):
668 def _pairspec(revspec):
667 tree = revsetlang.parse(revspec)
669 tree = revsetlang.parse(revspec)
668 return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall')
670 return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall')
669
671
670 def revpair(repo, revs):
672 def revpair(repo, revs):
671 if not revs:
673 if not revs:
672 return repo['.'], repo[None]
674 return repo['.'], repo[None]
673
675
674 l = revrange(repo, revs)
676 l = revrange(repo, revs)
675
677
676 if not l:
678 if not l:
677 first = second = None
679 first = second = None
678 elif l.isascending():
680 elif l.isascending():
679 first = l.min()
681 first = l.min()
680 second = l.max()
682 second = l.max()
681 elif l.isdescending():
683 elif l.isdescending():
682 first = l.max()
684 first = l.max()
683 second = l.min()
685 second = l.min()
684 else:
686 else:
685 first = l.first()
687 first = l.first()
686 second = l.last()
688 second = l.last()
687
689
688 if first is None:
690 if first is None:
689 raise error.Abort(_('empty revision range'))
691 raise error.Abort(_('empty revision range'))
690 if (first == second and len(revs) >= 2
692 if (first == second and len(revs) >= 2
691 and not all(revrange(repo, [r]) for r in revs)):
693 and not all(revrange(repo, [r]) for r in revs)):
692 raise error.Abort(_('empty revision on one side of range'))
694 raise error.Abort(_('empty revision on one side of range'))
693
695
694 # if top-level is range expression, the result must always be a pair
696 # if top-level is range expression, the result must always be a pair
695 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
697 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
696 return repo[first], repo[None]
698 return repo[first], repo[None]
697
699
698 return repo[first], repo[second]
700 return repo[first], repo[second]
699
701
700 def revrange(repo, specs, localalias=None):
702 def revrange(repo, specs, localalias=None):
701 """Execute 1 to many revsets and return the union.
703 """Execute 1 to many revsets and return the union.
702
704
703 This is the preferred mechanism for executing revsets using user-specified
705 This is the preferred mechanism for executing revsets using user-specified
704 config options, such as revset aliases.
706 config options, such as revset aliases.
705
707
706 The revsets specified by ``specs`` will be executed via a chained ``OR``
708 The revsets specified by ``specs`` will be executed via a chained ``OR``
707 expression. If ``specs`` is empty, an empty result is returned.
709 expression. If ``specs`` is empty, an empty result is returned.
708
710
709 ``specs`` can contain integers, in which case they are assumed to be
711 ``specs`` can contain integers, in which case they are assumed to be
710 revision numbers.
712 revision numbers.
711
713
712 It is assumed the revsets are already formatted. If you have arguments
714 It is assumed the revsets are already formatted. If you have arguments
713 that need to be expanded in the revset, call ``revsetlang.formatspec()``
715 that need to be expanded in the revset, call ``revsetlang.formatspec()``
714 and pass the result as an element of ``specs``.
716 and pass the result as an element of ``specs``.
715
717
716 Specifying a single revset is allowed.
718 Specifying a single revset is allowed.
717
719
718 Returns a ``revset.abstractsmartset`` which is a list-like interface over
720 Returns a ``revset.abstractsmartset`` which is a list-like interface over
719 integer revisions.
721 integer revisions.
720 """
722 """
721 allspecs = []
723 allspecs = []
722 for spec in specs:
724 for spec in specs:
723 if isinstance(spec, int):
725 if isinstance(spec, int):
724 spec = revsetlang.formatspec('rev(%d)', spec)
726 spec = revsetlang.formatspec('rev(%d)', spec)
725 allspecs.append(spec)
727 allspecs.append(spec)
726 return repo.anyrevs(allspecs, user=True, localalias=localalias)
728 return repo.anyrevs(allspecs, user=True, localalias=localalias)
727
729
728 def meaningfulparents(repo, ctx):
730 def meaningfulparents(repo, ctx):
729 """Return list of meaningful (or all if debug) parentrevs for rev.
731 """Return list of meaningful (or all if debug) parentrevs for rev.
730
732
731 For merges (two non-nullrev revisions) both parents are meaningful.
733 For merges (two non-nullrev revisions) both parents are meaningful.
732 Otherwise the first parent revision is considered meaningful if it
734 Otherwise the first parent revision is considered meaningful if it
733 is not the preceding revision.
735 is not the preceding revision.
734 """
736 """
735 parents = ctx.parents()
737 parents = ctx.parents()
736 if len(parents) > 1:
738 if len(parents) > 1:
737 return parents
739 return parents
738 if repo.ui.debugflag:
740 if repo.ui.debugflag:
739 return [parents[0], repo[nullrev]]
741 return [parents[0], repo[nullrev]]
740 if parents[0].rev() >= intrev(ctx) - 1:
742 if parents[0].rev() >= intrev(ctx) - 1:
741 return []
743 return []
742 return parents
744 return parents
743
745
744 def expandpats(pats):
746 def expandpats(pats):
745 '''Expand bare globs when running on windows.
747 '''Expand bare globs when running on windows.
746 On posix we assume it already has already been done by sh.'''
748 On posix we assume it already has already been done by sh.'''
747 if not util.expandglobs:
749 if not util.expandglobs:
748 return list(pats)
750 return list(pats)
749 ret = []
751 ret = []
750 for kindpat in pats:
752 for kindpat in pats:
751 kind, pat = matchmod._patsplit(kindpat, None)
753 kind, pat = matchmod._patsplit(kindpat, None)
752 if kind is None:
754 if kind is None:
753 try:
755 try:
754 globbed = glob.glob(pat)
756 globbed = glob.glob(pat)
755 except re.error:
757 except re.error:
756 globbed = [pat]
758 globbed = [pat]
757 if globbed:
759 if globbed:
758 ret.extend(globbed)
760 ret.extend(globbed)
759 continue
761 continue
760 ret.append(kindpat)
762 ret.append(kindpat)
761 return ret
763 return ret
762
764
763 def matchandpats(ctx, pats=(), opts=None, globbed=False, default='relpath',
765 def matchandpats(ctx, pats=(), opts=None, globbed=False, default='relpath',
764 badfn=None):
766 badfn=None):
765 '''Return a matcher and the patterns that were used.
767 '''Return a matcher and the patterns that were used.
766 The matcher will warn about bad matches, unless an alternate badfn callback
768 The matcher will warn about bad matches, unless an alternate badfn callback
767 is provided.'''
769 is provided.'''
768 if pats == ("",):
770 if pats == ("",):
769 pats = []
771 pats = []
770 if opts is None:
772 if opts is None:
771 opts = {}
773 opts = {}
772 if not globbed and default == 'relpath':
774 if not globbed and default == 'relpath':
773 pats = expandpats(pats or [])
775 pats = expandpats(pats or [])
774
776
775 def bad(f, msg):
777 def bad(f, msg):
776 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
778 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
777
779
778 if badfn is None:
780 if badfn is None:
779 badfn = bad
781 badfn = bad
780
782
781 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
783 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
782 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
784 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
783
785
784 if m.always():
786 if m.always():
785 pats = []
787 pats = []
786 return m, pats
788 return m, pats
787
789
788 def match(ctx, pats=(), opts=None, globbed=False, default='relpath',
790 def match(ctx, pats=(), opts=None, globbed=False, default='relpath',
789 badfn=None):
791 badfn=None):
790 '''Return a matcher that will warn about bad matches.'''
792 '''Return a matcher that will warn about bad matches.'''
791 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
793 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
792
794
793 def matchall(repo):
795 def matchall(repo):
794 '''Return a matcher that will efficiently match everything.'''
796 '''Return a matcher that will efficiently match everything.'''
795 return matchmod.always(repo.root, repo.getcwd())
797 return matchmod.always(repo.root, repo.getcwd())
796
798
797 def matchfiles(repo, files, badfn=None):
799 def matchfiles(repo, files, badfn=None):
798 '''Return a matcher that will efficiently match exactly these files.'''
800 '''Return a matcher that will efficiently match exactly these files.'''
799 return matchmod.exact(repo.root, repo.getcwd(), files, badfn=badfn)
801 return matchmod.exact(repo.root, repo.getcwd(), files, badfn=badfn)
800
802
801 def parsefollowlinespattern(repo, rev, pat, msg):
803 def parsefollowlinespattern(repo, rev, pat, msg):
802 """Return a file name from `pat` pattern suitable for usage in followlines
804 """Return a file name from `pat` pattern suitable for usage in followlines
803 logic.
805 logic.
804 """
806 """
805 if not matchmod.patkind(pat):
807 if not matchmod.patkind(pat):
806 return pathutil.canonpath(repo.root, repo.getcwd(), pat)
808 return pathutil.canonpath(repo.root, repo.getcwd(), pat)
807 else:
809 else:
808 ctx = repo[rev]
810 ctx = repo[rev]
809 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=ctx)
811 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=ctx)
810 files = [f for f in ctx if m(f)]
812 files = [f for f in ctx if m(f)]
811 if len(files) != 1:
813 if len(files) != 1:
812 raise error.ParseError(msg)
814 raise error.ParseError(msg)
813 return files[0]
815 return files[0]
814
816
815 def origpath(ui, repo, filepath):
817 def origpath(ui, repo, filepath):
816 '''customize where .orig files are created
818 '''customize where .orig files are created
817
819
818 Fetch user defined path from config file: [ui] origbackuppath = <path>
820 Fetch user defined path from config file: [ui] origbackuppath = <path>
819 Fall back to default (filepath with .orig suffix) if not specified
821 Fall back to default (filepath with .orig suffix) if not specified
820 '''
822 '''
821 origbackuppath = ui.config('ui', 'origbackuppath')
823 origbackuppath = ui.config('ui', 'origbackuppath')
822 if not origbackuppath:
824 if not origbackuppath:
823 return filepath + ".orig"
825 return filepath + ".orig"
824
826
825 # Convert filepath from an absolute path into a path inside the repo.
827 # Convert filepath from an absolute path into a path inside the repo.
826 filepathfromroot = util.normpath(os.path.relpath(filepath,
828 filepathfromroot = util.normpath(os.path.relpath(filepath,
827 start=repo.root))
829 start=repo.root))
828
830
829 origvfs = vfs.vfs(repo.wjoin(origbackuppath))
831 origvfs = vfs.vfs(repo.wjoin(origbackuppath))
830 origbackupdir = origvfs.dirname(filepathfromroot)
832 origbackupdir = origvfs.dirname(filepathfromroot)
831 if not origvfs.isdir(origbackupdir) or origvfs.islink(origbackupdir):
833 if not origvfs.isdir(origbackupdir) or origvfs.islink(origbackupdir):
832 ui.note(_('creating directory: %s\n') % origvfs.join(origbackupdir))
834 ui.note(_('creating directory: %s\n') % origvfs.join(origbackupdir))
833
835
834 # Remove any files that conflict with the backup file's path
836 # Remove any files that conflict with the backup file's path
835 for f in reversed(list(util.finddirs(filepathfromroot))):
837 for f in reversed(list(util.finddirs(filepathfromroot))):
836 if origvfs.isfileorlink(f):
838 if origvfs.isfileorlink(f):
837 ui.note(_('removing conflicting file: %s\n')
839 ui.note(_('removing conflicting file: %s\n')
838 % origvfs.join(f))
840 % origvfs.join(f))
839 origvfs.unlink(f)
841 origvfs.unlink(f)
840 break
842 break
841
843
842 origvfs.makedirs(origbackupdir)
844 origvfs.makedirs(origbackupdir)
843
845
844 if origvfs.isdir(filepathfromroot) and not origvfs.islink(filepathfromroot):
846 if origvfs.isdir(filepathfromroot) and not origvfs.islink(filepathfromroot):
845 ui.note(_('removing conflicting directory: %s\n')
847 ui.note(_('removing conflicting directory: %s\n')
846 % origvfs.join(filepathfromroot))
848 % origvfs.join(filepathfromroot))
847 origvfs.rmtree(filepathfromroot, forcibly=True)
849 origvfs.rmtree(filepathfromroot, forcibly=True)
848
850
849 return origvfs.join(filepathfromroot)
851 return origvfs.join(filepathfromroot)
850
852
851 class _containsnode(object):
853 class _containsnode(object):
852 """proxy __contains__(node) to container.__contains__ which accepts revs"""
854 """proxy __contains__(node) to container.__contains__ which accepts revs"""
853
855
854 def __init__(self, repo, revcontainer):
856 def __init__(self, repo, revcontainer):
855 self._torev = repo.changelog.rev
857 self._torev = repo.changelog.rev
856 self._revcontains = revcontainer.__contains__
858 self._revcontains = revcontainer.__contains__
857
859
858 def __contains__(self, node):
860 def __contains__(self, node):
859 return self._revcontains(self._torev(node))
861 return self._revcontains(self._torev(node))
860
862
861 def cleanupnodes(repo, replacements, operation, moves=None, metadata=None,
863 def cleanupnodes(repo, replacements, operation, moves=None, metadata=None,
862 fixphase=False, targetphase=None, backup=True):
864 fixphase=False, targetphase=None, backup=True):
863 """do common cleanups when old nodes are replaced by new nodes
865 """do common cleanups when old nodes are replaced by new nodes
864
866
865 That includes writing obsmarkers or stripping nodes, and moving bookmarks.
867 That includes writing obsmarkers or stripping nodes, and moving bookmarks.
866 (we might also want to move working directory parent in the future)
868 (we might also want to move working directory parent in the future)
867
869
868 By default, bookmark moves are calculated automatically from 'replacements',
870 By default, bookmark moves are calculated automatically from 'replacements',
869 but 'moves' can be used to override that. Also, 'moves' may include
871 but 'moves' can be used to override that. Also, 'moves' may include
870 additional bookmark moves that should not have associated obsmarkers.
872 additional bookmark moves that should not have associated obsmarkers.
871
873
872 replacements is {oldnode: [newnode]} or a iterable of nodes if they do not
874 replacements is {oldnode: [newnode]} or a iterable of nodes if they do not
873 have replacements. operation is a string, like "rebase".
875 have replacements. operation is a string, like "rebase".
874
876
875 metadata is dictionary containing metadata to be stored in obsmarker if
877 metadata is dictionary containing metadata to be stored in obsmarker if
876 obsolescence is enabled.
878 obsolescence is enabled.
877 """
879 """
878 assert fixphase or targetphase is None
880 assert fixphase or targetphase is None
879 if not replacements and not moves:
881 if not replacements and not moves:
880 return
882 return
881
883
882 # translate mapping's other forms
884 # translate mapping's other forms
883 if not util.safehasattr(replacements, 'items'):
885 if not util.safehasattr(replacements, 'items'):
884 replacements = {(n,): () for n in replacements}
886 replacements = {(n,): () for n in replacements}
885 else:
887 else:
886 # upgrading non tuple "source" to tuple ones for BC
888 # upgrading non tuple "source" to tuple ones for BC
887 repls = {}
889 repls = {}
888 for key, value in replacements.items():
890 for key, value in replacements.items():
889 if not isinstance(key, tuple):
891 if not isinstance(key, tuple):
890 key = (key,)
892 key = (key,)
891 repls[key] = value
893 repls[key] = value
892 replacements = repls
894 replacements = repls
893
895
894 # Calculate bookmark movements
896 # Calculate bookmark movements
895 if moves is None:
897 if moves is None:
896 moves = {}
898 moves = {}
897 # Unfiltered repo is needed since nodes in replacements might be hidden.
899 # Unfiltered repo is needed since nodes in replacements might be hidden.
898 unfi = repo.unfiltered()
900 unfi = repo.unfiltered()
899 for oldnodes, newnodes in replacements.items():
901 for oldnodes, newnodes in replacements.items():
900 for oldnode in oldnodes:
902 for oldnode in oldnodes:
901 if oldnode in moves:
903 if oldnode in moves:
902 continue
904 continue
903 if len(newnodes) > 1:
905 if len(newnodes) > 1:
904 # usually a split, take the one with biggest rev number
906 # usually a split, take the one with biggest rev number
905 newnode = next(unfi.set('max(%ln)', newnodes)).node()
907 newnode = next(unfi.set('max(%ln)', newnodes)).node()
906 elif len(newnodes) == 0:
908 elif len(newnodes) == 0:
907 # move bookmark backwards
909 # move bookmark backwards
908 allreplaced = []
910 allreplaced = []
909 for rep in replacements:
911 for rep in replacements:
910 allreplaced.extend(rep)
912 allreplaced.extend(rep)
911 roots = list(unfi.set('max((::%n) - %ln)', oldnode,
913 roots = list(unfi.set('max((::%n) - %ln)', oldnode,
912 allreplaced))
914 allreplaced))
913 if roots:
915 if roots:
914 newnode = roots[0].node()
916 newnode = roots[0].node()
915 else:
917 else:
916 newnode = nullid
918 newnode = nullid
917 else:
919 else:
918 newnode = newnodes[0]
920 newnode = newnodes[0]
919 moves[oldnode] = newnode
921 moves[oldnode] = newnode
920
922
921 allnewnodes = [n for ns in replacements.values() for n in ns]
923 allnewnodes = [n for ns in replacements.values() for n in ns]
922 toretract = {}
924 toretract = {}
923 toadvance = {}
925 toadvance = {}
924 if fixphase:
926 if fixphase:
925 precursors = {}
927 precursors = {}
926 for oldnodes, newnodes in replacements.items():
928 for oldnodes, newnodes in replacements.items():
927 for oldnode in oldnodes:
929 for oldnode in oldnodes:
928 for newnode in newnodes:
930 for newnode in newnodes:
929 precursors.setdefault(newnode, []).append(oldnode)
931 precursors.setdefault(newnode, []).append(oldnode)
930
932
931 allnewnodes.sort(key=lambda n: unfi[n].rev())
933 allnewnodes.sort(key=lambda n: unfi[n].rev())
932 newphases = {}
934 newphases = {}
933 def phase(ctx):
935 def phase(ctx):
934 return newphases.get(ctx.node(), ctx.phase())
936 return newphases.get(ctx.node(), ctx.phase())
935 for newnode in allnewnodes:
937 for newnode in allnewnodes:
936 ctx = unfi[newnode]
938 ctx = unfi[newnode]
937 parentphase = max(phase(p) for p in ctx.parents())
939 parentphase = max(phase(p) for p in ctx.parents())
938 if targetphase is None:
940 if targetphase is None:
939 oldphase = max(unfi[oldnode].phase()
941 oldphase = max(unfi[oldnode].phase()
940 for oldnode in precursors[newnode])
942 for oldnode in precursors[newnode])
941 newphase = max(oldphase, parentphase)
943 newphase = max(oldphase, parentphase)
942 else:
944 else:
943 newphase = max(targetphase, parentphase)
945 newphase = max(targetphase, parentphase)
944 newphases[newnode] = newphase
946 newphases[newnode] = newphase
945 if newphase > ctx.phase():
947 if newphase > ctx.phase():
946 toretract.setdefault(newphase, []).append(newnode)
948 toretract.setdefault(newphase, []).append(newnode)
947 elif newphase < ctx.phase():
949 elif newphase < ctx.phase():
948 toadvance.setdefault(newphase, []).append(newnode)
950 toadvance.setdefault(newphase, []).append(newnode)
949
951
950 with repo.transaction('cleanup') as tr:
952 with repo.transaction('cleanup') as tr:
951 # Move bookmarks
953 # Move bookmarks
952 bmarks = repo._bookmarks
954 bmarks = repo._bookmarks
953 bmarkchanges = []
955 bmarkchanges = []
954 for oldnode, newnode in moves.items():
956 for oldnode, newnode in moves.items():
955 oldbmarks = repo.nodebookmarks(oldnode)
957 oldbmarks = repo.nodebookmarks(oldnode)
956 if not oldbmarks:
958 if not oldbmarks:
957 continue
959 continue
958 from . import bookmarks # avoid import cycle
960 from . import bookmarks # avoid import cycle
959 repo.ui.debug('moving bookmarks %r from %s to %s\n' %
961 repo.ui.debug('moving bookmarks %r from %s to %s\n' %
960 (pycompat.rapply(pycompat.maybebytestr, oldbmarks),
962 (pycompat.rapply(pycompat.maybebytestr, oldbmarks),
961 hex(oldnode), hex(newnode)))
963 hex(oldnode), hex(newnode)))
962 # Delete divergent bookmarks being parents of related newnodes
964 # Delete divergent bookmarks being parents of related newnodes
963 deleterevs = repo.revs('parents(roots(%ln & (::%n))) - parents(%n)',
965 deleterevs = repo.revs('parents(roots(%ln & (::%n))) - parents(%n)',
964 allnewnodes, newnode, oldnode)
966 allnewnodes, newnode, oldnode)
965 deletenodes = _containsnode(repo, deleterevs)
967 deletenodes = _containsnode(repo, deleterevs)
966 for name in oldbmarks:
968 for name in oldbmarks:
967 bmarkchanges.append((name, newnode))
969 bmarkchanges.append((name, newnode))
968 for b in bookmarks.divergent2delete(repo, deletenodes, name):
970 for b in bookmarks.divergent2delete(repo, deletenodes, name):
969 bmarkchanges.append((b, None))
971 bmarkchanges.append((b, None))
970
972
971 if bmarkchanges:
973 if bmarkchanges:
972 bmarks.applychanges(repo, tr, bmarkchanges)
974 bmarks.applychanges(repo, tr, bmarkchanges)
973
975
974 for phase, nodes in toretract.items():
976 for phase, nodes in toretract.items():
975 phases.retractboundary(repo, tr, phase, nodes)
977 phases.retractboundary(repo, tr, phase, nodes)
976 for phase, nodes in toadvance.items():
978 for phase, nodes in toadvance.items():
977 phases.advanceboundary(repo, tr, phase, nodes)
979 phases.advanceboundary(repo, tr, phase, nodes)
978
980
979 # Obsolete or strip nodes
981 # Obsolete or strip nodes
980 if obsolete.isenabled(repo, obsolete.createmarkersopt):
982 if obsolete.isenabled(repo, obsolete.createmarkersopt):
981 # If a node is already obsoleted, and we want to obsolete it
983 # If a node is already obsoleted, and we want to obsolete it
982 # without a successor, skip that obssolete request since it's
984 # without a successor, skip that obssolete request since it's
983 # unnecessary. That's the "if s or not isobs(n)" check below.
985 # unnecessary. That's the "if s or not isobs(n)" check below.
984 # Also sort the node in topology order, that might be useful for
986 # Also sort the node in topology order, that might be useful for
985 # some obsstore logic.
987 # some obsstore logic.
986 # NOTE: the sorting might belong to createmarkers.
988 # NOTE: the sorting might belong to createmarkers.
987 torev = unfi.changelog.rev
989 torev = unfi.changelog.rev
988 sortfunc = lambda ns: torev(ns[0][0])
990 sortfunc = lambda ns: torev(ns[0][0])
989 rels = []
991 rels = []
990 for ns, s in sorted(replacements.items(), key=sortfunc):
992 for ns, s in sorted(replacements.items(), key=sortfunc):
991 rel = (tuple(unfi[n] for n in ns), tuple(unfi[m] for m in s))
993 rel = (tuple(unfi[n] for n in ns), tuple(unfi[m] for m in s))
992 rels.append(rel)
994 rels.append(rel)
993 if rels:
995 if rels:
994 obsolete.createmarkers(repo, rels, operation=operation,
996 obsolete.createmarkers(repo, rels, operation=operation,
995 metadata=metadata)
997 metadata=metadata)
996 else:
998 else:
997 from . import repair # avoid import cycle
999 from . import repair # avoid import cycle
998 tostrip = list(n for ns in replacements for n in ns)
1000 tostrip = list(n for ns in replacements for n in ns)
999 if tostrip:
1001 if tostrip:
1000 repair.delayedstrip(repo.ui, repo, tostrip, operation,
1002 repair.delayedstrip(repo.ui, repo, tostrip, operation,
1001 backup=backup)
1003 backup=backup)
1002
1004
1003 def addremove(repo, matcher, prefix, opts=None):
1005 def addremove(repo, matcher, prefix, opts=None):
1004 if opts is None:
1006 if opts is None:
1005 opts = {}
1007 opts = {}
1006 m = matcher
1008 m = matcher
1007 dry_run = opts.get('dry_run')
1009 dry_run = opts.get('dry_run')
1008 try:
1010 try:
1009 similarity = float(opts.get('similarity') or 0)
1011 similarity = float(opts.get('similarity') or 0)
1010 except ValueError:
1012 except ValueError:
1011 raise error.Abort(_('similarity must be a number'))
1013 raise error.Abort(_('similarity must be a number'))
1012 if similarity < 0 or similarity > 100:
1014 if similarity < 0 or similarity > 100:
1013 raise error.Abort(_('similarity must be between 0 and 100'))
1015 raise error.Abort(_('similarity must be between 0 and 100'))
1014 similarity /= 100.0
1016 similarity /= 100.0
1015
1017
1016 ret = 0
1018 ret = 0
1017 join = lambda f: os.path.join(prefix, f)
1019 join = lambda f: os.path.join(prefix, f)
1018
1020
1019 wctx = repo[None]
1021 wctx = repo[None]
1020 for subpath in sorted(wctx.substate):
1022 for subpath in sorted(wctx.substate):
1021 submatch = matchmod.subdirmatcher(subpath, m)
1023 submatch = matchmod.subdirmatcher(subpath, m)
1022 if opts.get('subrepos') or m.exact(subpath) or any(submatch.files()):
1024 if opts.get('subrepos') or m.exact(subpath) or any(submatch.files()):
1023 sub = wctx.sub(subpath)
1025 sub = wctx.sub(subpath)
1024 try:
1026 try:
1025 if sub.addremove(submatch, prefix, opts):
1027 if sub.addremove(submatch, prefix, opts):
1026 ret = 1
1028 ret = 1
1027 except error.LookupError:
1029 except error.LookupError:
1028 repo.ui.status(_("skipping missing subrepository: %s\n")
1030 repo.ui.status(_("skipping missing subrepository: %s\n")
1029 % join(subpath))
1031 % join(subpath))
1030
1032
1031 rejected = []
1033 rejected = []
1032 def badfn(f, msg):
1034 def badfn(f, msg):
1033 if f in m.files():
1035 if f in m.files():
1034 m.bad(f, msg)
1036 m.bad(f, msg)
1035 rejected.append(f)
1037 rejected.append(f)
1036
1038
1037 badmatch = matchmod.badmatch(m, badfn)
1039 badmatch = matchmod.badmatch(m, badfn)
1038 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
1040 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
1039 badmatch)
1041 badmatch)
1040
1042
1041 unknownset = set(unknown + forgotten)
1043 unknownset = set(unknown + forgotten)
1042 toprint = unknownset.copy()
1044 toprint = unknownset.copy()
1043 toprint.update(deleted)
1045 toprint.update(deleted)
1044 for abs in sorted(toprint):
1046 for abs in sorted(toprint):
1045 if repo.ui.verbose or not m.exact(abs):
1047 if repo.ui.verbose or not m.exact(abs):
1046 if abs in unknownset:
1048 if abs in unknownset:
1047 status = _('adding %s\n') % m.uipath(abs)
1049 status = _('adding %s\n') % m.uipath(abs)
1048 label = 'ui.addremove.added'
1050 label = 'ui.addremove.added'
1049 else:
1051 else:
1050 status = _('removing %s\n') % m.uipath(abs)
1052 status = _('removing %s\n') % m.uipath(abs)
1051 label = 'ui.addremove.removed'
1053 label = 'ui.addremove.removed'
1052 repo.ui.status(status, label=label)
1054 repo.ui.status(status, label=label)
1053
1055
1054 renames = _findrenames(repo, m, added + unknown, removed + deleted,
1056 renames = _findrenames(repo, m, added + unknown, removed + deleted,
1055 similarity)
1057 similarity)
1056
1058
1057 if not dry_run:
1059 if not dry_run:
1058 _markchanges(repo, unknown + forgotten, deleted, renames)
1060 _markchanges(repo, unknown + forgotten, deleted, renames)
1059
1061
1060 for f in rejected:
1062 for f in rejected:
1061 if f in m.files():
1063 if f in m.files():
1062 return 1
1064 return 1
1063 return ret
1065 return ret
1064
1066
1065 def marktouched(repo, files, similarity=0.0):
1067 def marktouched(repo, files, similarity=0.0):
1066 '''Assert that files have somehow been operated upon. files are relative to
1068 '''Assert that files have somehow been operated upon. files are relative to
1067 the repo root.'''
1069 the repo root.'''
1068 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
1070 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
1069 rejected = []
1071 rejected = []
1070
1072
1071 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
1073 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
1072
1074
1073 if repo.ui.verbose:
1075 if repo.ui.verbose:
1074 unknownset = set(unknown + forgotten)
1076 unknownset = set(unknown + forgotten)
1075 toprint = unknownset.copy()
1077 toprint = unknownset.copy()
1076 toprint.update(deleted)
1078 toprint.update(deleted)
1077 for abs in sorted(toprint):
1079 for abs in sorted(toprint):
1078 if abs in unknownset:
1080 if abs in unknownset:
1079 status = _('adding %s\n') % abs
1081 status = _('adding %s\n') % abs
1080 else:
1082 else:
1081 status = _('removing %s\n') % abs
1083 status = _('removing %s\n') % abs
1082 repo.ui.status(status)
1084 repo.ui.status(status)
1083
1085
1084 renames = _findrenames(repo, m, added + unknown, removed + deleted,
1086 renames = _findrenames(repo, m, added + unknown, removed + deleted,
1085 similarity)
1087 similarity)
1086
1088
1087 _markchanges(repo, unknown + forgotten, deleted, renames)
1089 _markchanges(repo, unknown + forgotten, deleted, renames)
1088
1090
1089 for f in rejected:
1091 for f in rejected:
1090 if f in m.files():
1092 if f in m.files():
1091 return 1
1093 return 1
1092 return 0
1094 return 0
1093
1095
1094 def _interestingfiles(repo, matcher):
1096 def _interestingfiles(repo, matcher):
1095 '''Walk dirstate with matcher, looking for files that addremove would care
1097 '''Walk dirstate with matcher, looking for files that addremove would care
1096 about.
1098 about.
1097
1099
1098 This is different from dirstate.status because it doesn't care about
1100 This is different from dirstate.status because it doesn't care about
1099 whether files are modified or clean.'''
1101 whether files are modified or clean.'''
1100 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1102 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1101 audit_path = pathutil.pathauditor(repo.root, cached=True)
1103 audit_path = pathutil.pathauditor(repo.root, cached=True)
1102
1104
1103 ctx = repo[None]
1105 ctx = repo[None]
1104 dirstate = repo.dirstate
1106 dirstate = repo.dirstate
1105 matcher = repo.narrowmatch(matcher, includeexact=True)
1107 matcher = repo.narrowmatch(matcher, includeexact=True)
1106 walkresults = dirstate.walk(matcher, subrepos=sorted(ctx.substate),
1108 walkresults = dirstate.walk(matcher, subrepos=sorted(ctx.substate),
1107 unknown=True, ignored=False, full=False)
1109 unknown=True, ignored=False, full=False)
1108 for abs, st in walkresults.iteritems():
1110 for abs, st in walkresults.iteritems():
1109 dstate = dirstate[abs]
1111 dstate = dirstate[abs]
1110 if dstate == '?' and audit_path.check(abs):
1112 if dstate == '?' and audit_path.check(abs):
1111 unknown.append(abs)
1113 unknown.append(abs)
1112 elif dstate != 'r' and not st:
1114 elif dstate != 'r' and not st:
1113 deleted.append(abs)
1115 deleted.append(abs)
1114 elif dstate == 'r' and st:
1116 elif dstate == 'r' and st:
1115 forgotten.append(abs)
1117 forgotten.append(abs)
1116 # for finding renames
1118 # for finding renames
1117 elif dstate == 'r' and not st:
1119 elif dstate == 'r' and not st:
1118 removed.append(abs)
1120 removed.append(abs)
1119 elif dstate == 'a':
1121 elif dstate == 'a':
1120 added.append(abs)
1122 added.append(abs)
1121
1123
1122 return added, unknown, deleted, removed, forgotten
1124 return added, unknown, deleted, removed, forgotten
1123
1125
1124 def _findrenames(repo, matcher, added, removed, similarity):
1126 def _findrenames(repo, matcher, added, removed, similarity):
1125 '''Find renames from removed files to added ones.'''
1127 '''Find renames from removed files to added ones.'''
1126 renames = {}
1128 renames = {}
1127 if similarity > 0:
1129 if similarity > 0:
1128 for old, new, score in similar.findrenames(repo, added, removed,
1130 for old, new, score in similar.findrenames(repo, added, removed,
1129 similarity):
1131 similarity):
1130 if (repo.ui.verbose or not matcher.exact(old)
1132 if (repo.ui.verbose or not matcher.exact(old)
1131 or not matcher.exact(new)):
1133 or not matcher.exact(new)):
1132 repo.ui.status(_('recording removal of %s as rename to %s '
1134 repo.ui.status(_('recording removal of %s as rename to %s '
1133 '(%d%% similar)\n') %
1135 '(%d%% similar)\n') %
1134 (matcher.rel(old), matcher.rel(new),
1136 (matcher.rel(old), matcher.rel(new),
1135 score * 100))
1137 score * 100))
1136 renames[new] = old
1138 renames[new] = old
1137 return renames
1139 return renames
1138
1140
1139 def _markchanges(repo, unknown, deleted, renames):
1141 def _markchanges(repo, unknown, deleted, renames):
1140 '''Marks the files in unknown as added, the files in deleted as removed,
1142 '''Marks the files in unknown as added, the files in deleted as removed,
1141 and the files in renames as copied.'''
1143 and the files in renames as copied.'''
1142 wctx = repo[None]
1144 wctx = repo[None]
1143 with repo.wlock():
1145 with repo.wlock():
1144 wctx.forget(deleted)
1146 wctx.forget(deleted)
1145 wctx.add(unknown)
1147 wctx.add(unknown)
1146 for new, old in renames.iteritems():
1148 for new, old in renames.iteritems():
1147 wctx.copy(old, new)
1149 wctx.copy(old, new)
1148
1150
1149 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1151 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1150 """Update the dirstate to reflect the intent of copying src to dst. For
1152 """Update the dirstate to reflect the intent of copying src to dst. For
1151 different reasons it might not end with dst being marked as copied from src.
1153 different reasons it might not end with dst being marked as copied from src.
1152 """
1154 """
1153 origsrc = repo.dirstate.copied(src) or src
1155 origsrc = repo.dirstate.copied(src) or src
1154 if dst == origsrc: # copying back a copy?
1156 if dst == origsrc: # copying back a copy?
1155 if repo.dirstate[dst] not in 'mn' and not dryrun:
1157 if repo.dirstate[dst] not in 'mn' and not dryrun:
1156 repo.dirstate.normallookup(dst)
1158 repo.dirstate.normallookup(dst)
1157 else:
1159 else:
1158 if repo.dirstate[origsrc] == 'a' and origsrc == src:
1160 if repo.dirstate[origsrc] == 'a' and origsrc == src:
1159 if not ui.quiet:
1161 if not ui.quiet:
1160 ui.warn(_("%s has not been committed yet, so no copy "
1162 ui.warn(_("%s has not been committed yet, so no copy "
1161 "data will be stored for %s.\n")
1163 "data will be stored for %s.\n")
1162 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
1164 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
1163 if repo.dirstate[dst] in '?r' and not dryrun:
1165 if repo.dirstate[dst] in '?r' and not dryrun:
1164 wctx.add([dst])
1166 wctx.add([dst])
1165 elif not dryrun:
1167 elif not dryrun:
1166 wctx.copy(origsrc, dst)
1168 wctx.copy(origsrc, dst)
1167
1169
1168 def writerequires(opener, requirements):
1170 def writerequires(opener, requirements):
1169 with opener('requires', 'w', atomictemp=True) as fp:
1171 with opener('requires', 'w', atomictemp=True) as fp:
1170 for r in sorted(requirements):
1172 for r in sorted(requirements):
1171 fp.write("%s\n" % r)
1173 fp.write("%s\n" % r)
1172
1174
1173 class filecachesubentry(object):
1175 class filecachesubentry(object):
1174 def __init__(self, path, stat):
1176 def __init__(self, path, stat):
1175 self.path = path
1177 self.path = path
1176 self.cachestat = None
1178 self.cachestat = None
1177 self._cacheable = None
1179 self._cacheable = None
1178
1180
1179 if stat:
1181 if stat:
1180 self.cachestat = filecachesubentry.stat(self.path)
1182 self.cachestat = filecachesubentry.stat(self.path)
1181
1183
1182 if self.cachestat:
1184 if self.cachestat:
1183 self._cacheable = self.cachestat.cacheable()
1185 self._cacheable = self.cachestat.cacheable()
1184 else:
1186 else:
1185 # None means we don't know yet
1187 # None means we don't know yet
1186 self._cacheable = None
1188 self._cacheable = None
1187
1189
1188 def refresh(self):
1190 def refresh(self):
1189 if self.cacheable():
1191 if self.cacheable():
1190 self.cachestat = filecachesubentry.stat(self.path)
1192 self.cachestat = filecachesubentry.stat(self.path)
1191
1193
1192 def cacheable(self):
1194 def cacheable(self):
1193 if self._cacheable is not None:
1195 if self._cacheable is not None:
1194 return self._cacheable
1196 return self._cacheable
1195
1197
1196 # we don't know yet, assume it is for now
1198 # we don't know yet, assume it is for now
1197 return True
1199 return True
1198
1200
1199 def changed(self):
1201 def changed(self):
1200 # no point in going further if we can't cache it
1202 # no point in going further if we can't cache it
1201 if not self.cacheable():
1203 if not self.cacheable():
1202 return True
1204 return True
1203
1205
1204 newstat = filecachesubentry.stat(self.path)
1206 newstat = filecachesubentry.stat(self.path)
1205
1207
1206 # we may not know if it's cacheable yet, check again now
1208 # we may not know if it's cacheable yet, check again now
1207 if newstat and self._cacheable is None:
1209 if newstat and self._cacheable is None:
1208 self._cacheable = newstat.cacheable()
1210 self._cacheable = newstat.cacheable()
1209
1211
1210 # check again
1212 # check again
1211 if not self._cacheable:
1213 if not self._cacheable:
1212 return True
1214 return True
1213
1215
1214 if self.cachestat != newstat:
1216 if self.cachestat != newstat:
1215 self.cachestat = newstat
1217 self.cachestat = newstat
1216 return True
1218 return True
1217 else:
1219 else:
1218 return False
1220 return False
1219
1221
1220 @staticmethod
1222 @staticmethod
1221 def stat(path):
1223 def stat(path):
1222 try:
1224 try:
1223 return util.cachestat(path)
1225 return util.cachestat(path)
1224 except OSError as e:
1226 except OSError as e:
1225 if e.errno != errno.ENOENT:
1227 if e.errno != errno.ENOENT:
1226 raise
1228 raise
1227
1229
1228 class filecacheentry(object):
1230 class filecacheentry(object):
1229 def __init__(self, paths, stat=True):
1231 def __init__(self, paths, stat=True):
1230 self._entries = []
1232 self._entries = []
1231 for path in paths:
1233 for path in paths:
1232 self._entries.append(filecachesubentry(path, stat))
1234 self._entries.append(filecachesubentry(path, stat))
1233
1235
1234 def changed(self):
1236 def changed(self):
1235 '''true if any entry has changed'''
1237 '''true if any entry has changed'''
1236 for entry in self._entries:
1238 for entry in self._entries:
1237 if entry.changed():
1239 if entry.changed():
1238 return True
1240 return True
1239 return False
1241 return False
1240
1242
1241 def refresh(self):
1243 def refresh(self):
1242 for entry in self._entries:
1244 for entry in self._entries:
1243 entry.refresh()
1245 entry.refresh()
1244
1246
1245 class filecache(object):
1247 class filecache(object):
1246 """A property like decorator that tracks files under .hg/ for updates.
1248 """A property like decorator that tracks files under .hg/ for updates.
1247
1249
1248 On first access, the files defined as arguments are stat()ed and the
1250 On first access, the files defined as arguments are stat()ed and the
1249 results cached. The decorated function is called. The results are stashed
1251 results cached. The decorated function is called. The results are stashed
1250 away in a ``_filecache`` dict on the object whose method is decorated.
1252 away in a ``_filecache`` dict on the object whose method is decorated.
1251
1253
1252 On subsequent access, the cached result is used as it is set to the
1254 On subsequent access, the cached result is used as it is set to the
1253 instance dictionary.
1255 instance dictionary.
1254
1256
1255 On external property set/delete operations, the caller must update the
1257 On external property set/delete operations, the caller must update the
1256 corresponding _filecache entry appropriately. Use __class__.<attr>.set()
1258 corresponding _filecache entry appropriately. Use __class__.<attr>.set()
1257 instead of directly setting <attr>.
1259 instead of directly setting <attr>.
1258
1260
1259 When using the property API, the cached data is always used if available.
1261 When using the property API, the cached data is always used if available.
1260 No stat() is performed to check if the file has changed.
1262 No stat() is performed to check if the file has changed.
1261
1263
1262 Others can muck about with the state of the ``_filecache`` dict. e.g. they
1264 Others can muck about with the state of the ``_filecache`` dict. e.g. they
1263 can populate an entry before the property's getter is called. In this case,
1265 can populate an entry before the property's getter is called. In this case,
1264 entries in ``_filecache`` will be used during property operations,
1266 entries in ``_filecache`` will be used during property operations,
1265 if available. If the underlying file changes, it is up to external callers
1267 if available. If the underlying file changes, it is up to external callers
1266 to reflect this by e.g. calling ``delattr(obj, attr)`` to remove the cached
1268 to reflect this by e.g. calling ``delattr(obj, attr)`` to remove the cached
1267 method result as well as possibly calling ``del obj._filecache[attr]`` to
1269 method result as well as possibly calling ``del obj._filecache[attr]`` to
1268 remove the ``filecacheentry``.
1270 remove the ``filecacheentry``.
1269 """
1271 """
1270
1272
1271 def __init__(self, *paths):
1273 def __init__(self, *paths):
1272 self.paths = paths
1274 self.paths = paths
1273
1275
1274 def join(self, obj, fname):
1276 def join(self, obj, fname):
1275 """Used to compute the runtime path of a cached file.
1277 """Used to compute the runtime path of a cached file.
1276
1278
1277 Users should subclass filecache and provide their own version of this
1279 Users should subclass filecache and provide their own version of this
1278 function to call the appropriate join function on 'obj' (an instance
1280 function to call the appropriate join function on 'obj' (an instance
1279 of the class that its member function was decorated).
1281 of the class that its member function was decorated).
1280 """
1282 """
1281 raise NotImplementedError
1283 raise NotImplementedError
1282
1284
1283 def __call__(self, func):
1285 def __call__(self, func):
1284 self.func = func
1286 self.func = func
1285 self.sname = func.__name__
1287 self.sname = func.__name__
1286 self.name = pycompat.sysbytes(self.sname)
1288 self.name = pycompat.sysbytes(self.sname)
1287 return self
1289 return self
1288
1290
1289 def __get__(self, obj, type=None):
1291 def __get__(self, obj, type=None):
1290 # if accessed on the class, return the descriptor itself.
1292 # if accessed on the class, return the descriptor itself.
1291 if obj is None:
1293 if obj is None:
1292 return self
1294 return self
1293
1295
1294 assert self.sname not in obj.__dict__
1296 assert self.sname not in obj.__dict__
1295
1297
1296 entry = obj._filecache.get(self.name)
1298 entry = obj._filecache.get(self.name)
1297
1299
1298 if entry:
1300 if entry:
1299 if entry.changed():
1301 if entry.changed():
1300 entry.obj = self.func(obj)
1302 entry.obj = self.func(obj)
1301 else:
1303 else:
1302 paths = [self.join(obj, path) for path in self.paths]
1304 paths = [self.join(obj, path) for path in self.paths]
1303
1305
1304 # We stat -before- creating the object so our cache doesn't lie if
1306 # We stat -before- creating the object so our cache doesn't lie if
1305 # a writer modified between the time we read and stat
1307 # a writer modified between the time we read and stat
1306 entry = filecacheentry(paths, True)
1308 entry = filecacheentry(paths, True)
1307 entry.obj = self.func(obj)
1309 entry.obj = self.func(obj)
1308
1310
1309 obj._filecache[self.name] = entry
1311 obj._filecache[self.name] = entry
1310
1312
1311 obj.__dict__[self.sname] = entry.obj
1313 obj.__dict__[self.sname] = entry.obj
1312 return entry.obj
1314 return entry.obj
1313
1315
1314 # don't implement __set__(), which would make __dict__ lookup as slow as
1316 # don't implement __set__(), which would make __dict__ lookup as slow as
1315 # function call.
1317 # function call.
1316
1318
1317 def set(self, obj, value):
1319 def set(self, obj, value):
1318 if self.name not in obj._filecache:
1320 if self.name not in obj._filecache:
1319 # we add an entry for the missing value because X in __dict__
1321 # we add an entry for the missing value because X in __dict__
1320 # implies X in _filecache
1322 # implies X in _filecache
1321 paths = [self.join(obj, path) for path in self.paths]
1323 paths = [self.join(obj, path) for path in self.paths]
1322 ce = filecacheentry(paths, False)
1324 ce = filecacheentry(paths, False)
1323 obj._filecache[self.name] = ce
1325 obj._filecache[self.name] = ce
1324 else:
1326 else:
1325 ce = obj._filecache[self.name]
1327 ce = obj._filecache[self.name]
1326
1328
1327 ce.obj = value # update cached copy
1329 ce.obj = value # update cached copy
1328 obj.__dict__[self.sname] = value # update copy returned by obj.x
1330 obj.__dict__[self.sname] = value # update copy returned by obj.x
1329
1331
1330 def extdatasource(repo, source):
1332 def extdatasource(repo, source):
1331 """Gather a map of rev -> value dict from the specified source
1333 """Gather a map of rev -> value dict from the specified source
1332
1334
1333 A source spec is treated as a URL, with a special case shell: type
1335 A source spec is treated as a URL, with a special case shell: type
1334 for parsing the output from a shell command.
1336 for parsing the output from a shell command.
1335
1337
1336 The data is parsed as a series of newline-separated records where
1338 The data is parsed as a series of newline-separated records where
1337 each record is a revision specifier optionally followed by a space
1339 each record is a revision specifier optionally followed by a space
1338 and a freeform string value. If the revision is known locally, it
1340 and a freeform string value. If the revision is known locally, it
1339 is converted to a rev, otherwise the record is skipped.
1341 is converted to a rev, otherwise the record is skipped.
1340
1342
1341 Note that both key and value are treated as UTF-8 and converted to
1343 Note that both key and value are treated as UTF-8 and converted to
1342 the local encoding. This allows uniformity between local and
1344 the local encoding. This allows uniformity between local and
1343 remote data sources.
1345 remote data sources.
1344 """
1346 """
1345
1347
1346 spec = repo.ui.config("extdata", source)
1348 spec = repo.ui.config("extdata", source)
1347 if not spec:
1349 if not spec:
1348 raise error.Abort(_("unknown extdata source '%s'") % source)
1350 raise error.Abort(_("unknown extdata source '%s'") % source)
1349
1351
1350 data = {}
1352 data = {}
1351 src = proc = None
1353 src = proc = None
1352 try:
1354 try:
1353 if spec.startswith("shell:"):
1355 if spec.startswith("shell:"):
1354 # external commands should be run relative to the repo root
1356 # external commands should be run relative to the repo root
1355 cmd = spec[6:]
1357 cmd = spec[6:]
1356 proc = subprocess.Popen(procutil.tonativestr(cmd),
1358 proc = subprocess.Popen(procutil.tonativestr(cmd),
1357 shell=True, bufsize=-1,
1359 shell=True, bufsize=-1,
1358 close_fds=procutil.closefds,
1360 close_fds=procutil.closefds,
1359 stdout=subprocess.PIPE,
1361 stdout=subprocess.PIPE,
1360 cwd=procutil.tonativestr(repo.root))
1362 cwd=procutil.tonativestr(repo.root))
1361 src = proc.stdout
1363 src = proc.stdout
1362 else:
1364 else:
1363 # treat as a URL or file
1365 # treat as a URL or file
1364 src = url.open(repo.ui, spec)
1366 src = url.open(repo.ui, spec)
1365 for l in src:
1367 for l in src:
1366 if " " in l:
1368 if " " in l:
1367 k, v = l.strip().split(" ", 1)
1369 k, v = l.strip().split(" ", 1)
1368 else:
1370 else:
1369 k, v = l.strip(), ""
1371 k, v = l.strip(), ""
1370
1372
1371 k = encoding.tolocal(k)
1373 k = encoding.tolocal(k)
1372 try:
1374 try:
1373 data[revsingle(repo, k).rev()] = encoding.tolocal(v)
1375 data[revsingle(repo, k).rev()] = encoding.tolocal(v)
1374 except (error.LookupError, error.RepoLookupError):
1376 except (error.LookupError, error.RepoLookupError):
1375 pass # we ignore data for nodes that don't exist locally
1377 pass # we ignore data for nodes that don't exist locally
1376 finally:
1378 finally:
1377 if proc:
1379 if proc:
1378 proc.communicate()
1380 proc.communicate()
1379 if src:
1381 if src:
1380 src.close()
1382 src.close()
1381 if proc and proc.returncode != 0:
1383 if proc and proc.returncode != 0:
1382 raise error.Abort(_("extdata command '%s' failed: %s")
1384 raise error.Abort(_("extdata command '%s' failed: %s")
1383 % (cmd, procutil.explainexit(proc.returncode)))
1385 % (cmd, procutil.explainexit(proc.returncode)))
1384
1386
1385 return data
1387 return data
1386
1388
1387 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1389 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1388 if lock is None:
1390 if lock is None:
1389 raise error.LockInheritanceContractViolation(
1391 raise error.LockInheritanceContractViolation(
1390 'lock can only be inherited while held')
1392 'lock can only be inherited while held')
1391 if environ is None:
1393 if environ is None:
1392 environ = {}
1394 environ = {}
1393 with lock.inherit() as locker:
1395 with lock.inherit() as locker:
1394 environ[envvar] = locker
1396 environ[envvar] = locker
1395 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1397 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1396
1398
1397 def wlocksub(repo, cmd, *args, **kwargs):
1399 def wlocksub(repo, cmd, *args, **kwargs):
1398 """run cmd as a subprocess that allows inheriting repo's wlock
1400 """run cmd as a subprocess that allows inheriting repo's wlock
1399
1401
1400 This can only be called while the wlock is held. This takes all the
1402 This can only be called while the wlock is held. This takes all the
1401 arguments that ui.system does, and returns the exit code of the
1403 arguments that ui.system does, and returns the exit code of the
1402 subprocess."""
1404 subprocess."""
1403 return _locksub(repo, repo.currentwlock(), 'HG_WLOCK_LOCKER', cmd, *args,
1405 return _locksub(repo, repo.currentwlock(), 'HG_WLOCK_LOCKER', cmd, *args,
1404 **kwargs)
1406 **kwargs)
1405
1407
1406 class progress(object):
1408 class progress(object):
1407 def __init__(self, ui, topic, unit="", total=None):
1409 def __init__(self, ui, topic, unit="", total=None):
1408 self.ui = ui
1410 self.ui = ui
1409 self.pos = 0
1411 self.pos = 0
1410 self.topic = topic
1412 self.topic = topic
1411 self.unit = unit
1413 self.unit = unit
1412 self.total = total
1414 self.total = total
1413
1415
1414 def __enter__(self):
1416 def __enter__(self):
1415 return self
1417 return self
1416
1418
1417 def __exit__(self, exc_type, exc_value, exc_tb):
1419 def __exit__(self, exc_type, exc_value, exc_tb):
1418 self.complete()
1420 self.complete()
1419
1421
1420 def update(self, pos, item="", total=None):
1422 def update(self, pos, item="", total=None):
1421 assert pos is not None
1423 assert pos is not None
1422 if total:
1424 if total:
1423 self.total = total
1425 self.total = total
1424 self.pos = pos
1426 self.pos = pos
1425 self._print(item)
1427 self._print(item)
1426
1428
1427 def increment(self, step=1, item="", total=None):
1429 def increment(self, step=1, item="", total=None):
1428 self.update(self.pos + step, item, total)
1430 self.update(self.pos + step, item, total)
1429
1431
1430 def complete(self):
1432 def complete(self):
1431 self.ui.progress(self.topic, None)
1433 self.ui.progress(self.topic, None)
1432
1434
1433 def _print(self, item):
1435 def _print(self, item):
1434 self.ui.progress(self.topic, self.pos, item, self.unit,
1436 self.ui.progress(self.topic, self.pos, item, self.unit,
1435 self.total)
1437 self.total)
1436
1438
1437 def gdinitconfig(ui):
1439 def gdinitconfig(ui):
1438 """helper function to know if a repo should be created as general delta
1440 """helper function to know if a repo should be created as general delta
1439 """
1441 """
1440 # experimental config: format.generaldelta
1442 # experimental config: format.generaldelta
1441 return (ui.configbool('format', 'generaldelta')
1443 return (ui.configbool('format', 'generaldelta')
1442 or ui.configbool('format', 'usegeneraldelta')
1444 or ui.configbool('format', 'usegeneraldelta')
1443 or ui.configbool('format', 'sparse-revlog'))
1445 or ui.configbool('format', 'sparse-revlog'))
1444
1446
1445 def gddeltaconfig(ui):
1447 def gddeltaconfig(ui):
1446 """helper function to know if incoming delta should be optimised
1448 """helper function to know if incoming delta should be optimised
1447 """
1449 """
1448 # experimental config: format.generaldelta
1450 # experimental config: format.generaldelta
1449 return ui.configbool('format', 'generaldelta')
1451 return ui.configbool('format', 'generaldelta')
1450
1452
1451 class simplekeyvaluefile(object):
1453 class simplekeyvaluefile(object):
1452 """A simple file with key=value lines
1454 """A simple file with key=value lines
1453
1455
1454 Keys must be alphanumerics and start with a letter, values must not
1456 Keys must be alphanumerics and start with a letter, values must not
1455 contain '\n' characters"""
1457 contain '\n' characters"""
1456 firstlinekey = '__firstline'
1458 firstlinekey = '__firstline'
1457
1459
1458 def __init__(self, vfs, path, keys=None):
1460 def __init__(self, vfs, path, keys=None):
1459 self.vfs = vfs
1461 self.vfs = vfs
1460 self.path = path
1462 self.path = path
1461
1463
1462 def read(self, firstlinenonkeyval=False):
1464 def read(self, firstlinenonkeyval=False):
1463 """Read the contents of a simple key-value file
1465 """Read the contents of a simple key-value file
1464
1466
1465 'firstlinenonkeyval' indicates whether the first line of file should
1467 'firstlinenonkeyval' indicates whether the first line of file should
1466 be treated as a key-value pair or reuturned fully under the
1468 be treated as a key-value pair or reuturned fully under the
1467 __firstline key."""
1469 __firstline key."""
1468 lines = self.vfs.readlines(self.path)
1470 lines = self.vfs.readlines(self.path)
1469 d = {}
1471 d = {}
1470 if firstlinenonkeyval:
1472 if firstlinenonkeyval:
1471 if not lines:
1473 if not lines:
1472 e = _("empty simplekeyvalue file")
1474 e = _("empty simplekeyvalue file")
1473 raise error.CorruptedState(e)
1475 raise error.CorruptedState(e)
1474 # we don't want to include '\n' in the __firstline
1476 # we don't want to include '\n' in the __firstline
1475 d[self.firstlinekey] = lines[0][:-1]
1477 d[self.firstlinekey] = lines[0][:-1]
1476 del lines[0]
1478 del lines[0]
1477
1479
1478 try:
1480 try:
1479 # the 'if line.strip()' part prevents us from failing on empty
1481 # the 'if line.strip()' part prevents us from failing on empty
1480 # lines which only contain '\n' therefore are not skipped
1482 # lines which only contain '\n' therefore are not skipped
1481 # by 'if line'
1483 # by 'if line'
1482 updatedict = dict(line[:-1].split('=', 1) for line in lines
1484 updatedict = dict(line[:-1].split('=', 1) for line in lines
1483 if line.strip())
1485 if line.strip())
1484 if self.firstlinekey in updatedict:
1486 if self.firstlinekey in updatedict:
1485 e = _("%r can't be used as a key")
1487 e = _("%r can't be used as a key")
1486 raise error.CorruptedState(e % self.firstlinekey)
1488 raise error.CorruptedState(e % self.firstlinekey)
1487 d.update(updatedict)
1489 d.update(updatedict)
1488 except ValueError as e:
1490 except ValueError as e:
1489 raise error.CorruptedState(str(e))
1491 raise error.CorruptedState(str(e))
1490 return d
1492 return d
1491
1493
1492 def write(self, data, firstline=None):
1494 def write(self, data, firstline=None):
1493 """Write key=>value mapping to a file
1495 """Write key=>value mapping to a file
1494 data is a dict. Keys must be alphanumerical and start with a letter.
1496 data is a dict. Keys must be alphanumerical and start with a letter.
1495 Values must not contain newline characters.
1497 Values must not contain newline characters.
1496
1498
1497 If 'firstline' is not None, it is written to file before
1499 If 'firstline' is not None, it is written to file before
1498 everything else, as it is, not in a key=value form"""
1500 everything else, as it is, not in a key=value form"""
1499 lines = []
1501 lines = []
1500 if firstline is not None:
1502 if firstline is not None:
1501 lines.append('%s\n' % firstline)
1503 lines.append('%s\n' % firstline)
1502
1504
1503 for k, v in data.items():
1505 for k, v in data.items():
1504 if k == self.firstlinekey:
1506 if k == self.firstlinekey:
1505 e = "key name '%s' is reserved" % self.firstlinekey
1507 e = "key name '%s' is reserved" % self.firstlinekey
1506 raise error.ProgrammingError(e)
1508 raise error.ProgrammingError(e)
1507 if not k[0:1].isalpha():
1509 if not k[0:1].isalpha():
1508 e = "keys must start with a letter in a key-value file"
1510 e = "keys must start with a letter in a key-value file"
1509 raise error.ProgrammingError(e)
1511 raise error.ProgrammingError(e)
1510 if not k.isalnum():
1512 if not k.isalnum():
1511 e = "invalid key name in a simple key-value file"
1513 e = "invalid key name in a simple key-value file"
1512 raise error.ProgrammingError(e)
1514 raise error.ProgrammingError(e)
1513 if '\n' in v:
1515 if '\n' in v:
1514 e = "invalid value in a simple key-value file"
1516 e = "invalid value in a simple key-value file"
1515 raise error.ProgrammingError(e)
1517 raise error.ProgrammingError(e)
1516 lines.append("%s=%s\n" % (k, v))
1518 lines.append("%s=%s\n" % (k, v))
1517 with self.vfs(self.path, mode='wb', atomictemp=True) as fp:
1519 with self.vfs(self.path, mode='wb', atomictemp=True) as fp:
1518 fp.write(''.join(lines))
1520 fp.write(''.join(lines))
1519
1521
1520 _reportobsoletedsource = [
1522 _reportobsoletedsource = [
1521 'debugobsolete',
1523 'debugobsolete',
1522 'pull',
1524 'pull',
1523 'push',
1525 'push',
1524 'serve',
1526 'serve',
1525 'unbundle',
1527 'unbundle',
1526 ]
1528 ]
1527
1529
1528 _reportnewcssource = [
1530 _reportnewcssource = [
1529 'pull',
1531 'pull',
1530 'unbundle',
1532 'unbundle',
1531 ]
1533 ]
1532
1534
1533 def prefetchfiles(repo, revs, match):
1535 def prefetchfiles(repo, revs, match):
1534 """Invokes the registered file prefetch functions, allowing extensions to
1536 """Invokes the registered file prefetch functions, allowing extensions to
1535 ensure the corresponding files are available locally, before the command
1537 ensure the corresponding files are available locally, before the command
1536 uses them."""
1538 uses them."""
1537 if match:
1539 if match:
1538 # The command itself will complain about files that don't exist, so
1540 # The command itself will complain about files that don't exist, so
1539 # don't duplicate the message.
1541 # don't duplicate the message.
1540 match = matchmod.badmatch(match, lambda fn, msg: None)
1542 match = matchmod.badmatch(match, lambda fn, msg: None)
1541 else:
1543 else:
1542 match = matchall(repo)
1544 match = matchall(repo)
1543
1545
1544 fileprefetchhooks(repo, revs, match)
1546 fileprefetchhooks(repo, revs, match)
1545
1547
1546 # a list of (repo, revs, match) prefetch functions
1548 # a list of (repo, revs, match) prefetch functions
1547 fileprefetchhooks = util.hooks()
1549 fileprefetchhooks = util.hooks()
1548
1550
1549 # A marker that tells the evolve extension to suppress its own reporting
1551 # A marker that tells the evolve extension to suppress its own reporting
1550 _reportstroubledchangesets = True
1552 _reportstroubledchangesets = True
1551
1553
1552 def registersummarycallback(repo, otr, txnname=''):
1554 def registersummarycallback(repo, otr, txnname=''):
1553 """register a callback to issue a summary after the transaction is closed
1555 """register a callback to issue a summary after the transaction is closed
1554 """
1556 """
1555 def txmatch(sources):
1557 def txmatch(sources):
1556 return any(txnname.startswith(source) for source in sources)
1558 return any(txnname.startswith(source) for source in sources)
1557
1559
1558 categories = []
1560 categories = []
1559
1561
1560 def reportsummary(func):
1562 def reportsummary(func):
1561 """decorator for report callbacks."""
1563 """decorator for report callbacks."""
1562 # The repoview life cycle is shorter than the one of the actual
1564 # The repoview life cycle is shorter than the one of the actual
1563 # underlying repository. So the filtered object can die before the
1565 # underlying repository. So the filtered object can die before the
1564 # weakref is used leading to troubles. We keep a reference to the
1566 # weakref is used leading to troubles. We keep a reference to the
1565 # unfiltered object and restore the filtering when retrieving the
1567 # unfiltered object and restore the filtering when retrieving the
1566 # repository through the weakref.
1568 # repository through the weakref.
1567 filtername = repo.filtername
1569 filtername = repo.filtername
1568 reporef = weakref.ref(repo.unfiltered())
1570 reporef = weakref.ref(repo.unfiltered())
1569 def wrapped(tr):
1571 def wrapped(tr):
1570 repo = reporef()
1572 repo = reporef()
1571 if filtername:
1573 if filtername:
1572 repo = repo.filtered(filtername)
1574 repo = repo.filtered(filtername)
1573 func(repo, tr)
1575 func(repo, tr)
1574 newcat = '%02i-txnreport' % len(categories)
1576 newcat = '%02i-txnreport' % len(categories)
1575 otr.addpostclose(newcat, wrapped)
1577 otr.addpostclose(newcat, wrapped)
1576 categories.append(newcat)
1578 categories.append(newcat)
1577 return wrapped
1579 return wrapped
1578
1580
1579 if txmatch(_reportobsoletedsource):
1581 if txmatch(_reportobsoletedsource):
1580 @reportsummary
1582 @reportsummary
1581 def reportobsoleted(repo, tr):
1583 def reportobsoleted(repo, tr):
1582 obsoleted = obsutil.getobsoleted(repo, tr)
1584 obsoleted = obsutil.getobsoleted(repo, tr)
1583 if obsoleted:
1585 if obsoleted:
1584 repo.ui.status(_('obsoleted %i changesets\n')
1586 repo.ui.status(_('obsoleted %i changesets\n')
1585 % len(obsoleted))
1587 % len(obsoleted))
1586
1588
1587 if (obsolete.isenabled(repo, obsolete.createmarkersopt) and
1589 if (obsolete.isenabled(repo, obsolete.createmarkersopt) and
1588 repo.ui.configbool('experimental', 'evolution.report-instabilities')):
1590 repo.ui.configbool('experimental', 'evolution.report-instabilities')):
1589 instabilitytypes = [
1591 instabilitytypes = [
1590 ('orphan', 'orphan'),
1592 ('orphan', 'orphan'),
1591 ('phase-divergent', 'phasedivergent'),
1593 ('phase-divergent', 'phasedivergent'),
1592 ('content-divergent', 'contentdivergent'),
1594 ('content-divergent', 'contentdivergent'),
1593 ]
1595 ]
1594
1596
1595 def getinstabilitycounts(repo):
1597 def getinstabilitycounts(repo):
1596 filtered = repo.changelog.filteredrevs
1598 filtered = repo.changelog.filteredrevs
1597 counts = {}
1599 counts = {}
1598 for instability, revset in instabilitytypes:
1600 for instability, revset in instabilitytypes:
1599 counts[instability] = len(set(obsolete.getrevs(repo, revset)) -
1601 counts[instability] = len(set(obsolete.getrevs(repo, revset)) -
1600 filtered)
1602 filtered)
1601 return counts
1603 return counts
1602
1604
1603 oldinstabilitycounts = getinstabilitycounts(repo)
1605 oldinstabilitycounts = getinstabilitycounts(repo)
1604 @reportsummary
1606 @reportsummary
1605 def reportnewinstabilities(repo, tr):
1607 def reportnewinstabilities(repo, tr):
1606 newinstabilitycounts = getinstabilitycounts(repo)
1608 newinstabilitycounts = getinstabilitycounts(repo)
1607 for instability, revset in instabilitytypes:
1609 for instability, revset in instabilitytypes:
1608 delta = (newinstabilitycounts[instability] -
1610 delta = (newinstabilitycounts[instability] -
1609 oldinstabilitycounts[instability])
1611 oldinstabilitycounts[instability])
1610 msg = getinstabilitymessage(delta, instability)
1612 msg = getinstabilitymessage(delta, instability)
1611 if msg:
1613 if msg:
1612 repo.ui.warn(msg)
1614 repo.ui.warn(msg)
1613
1615
1614 if txmatch(_reportnewcssource):
1616 if txmatch(_reportnewcssource):
1615 @reportsummary
1617 @reportsummary
1616 def reportnewcs(repo, tr):
1618 def reportnewcs(repo, tr):
1617 """Report the range of new revisions pulled/unbundled."""
1619 """Report the range of new revisions pulled/unbundled."""
1618 origrepolen = tr.changes.get('origrepolen', len(repo))
1620 origrepolen = tr.changes.get('origrepolen', len(repo))
1619 unfi = repo.unfiltered()
1621 unfi = repo.unfiltered()
1620 if origrepolen >= len(unfi):
1622 if origrepolen >= len(unfi):
1621 return
1623 return
1622
1624
1623 # Compute the bounds of new visible revisions' range.
1625 # Compute the bounds of new visible revisions' range.
1624 revs = smartset.spanset(repo, start=origrepolen)
1626 revs = smartset.spanset(repo, start=origrepolen)
1625 if revs:
1627 if revs:
1626 minrev, maxrev = repo[revs.min()], repo[revs.max()]
1628 minrev, maxrev = repo[revs.min()], repo[revs.max()]
1627
1629
1628 if minrev == maxrev:
1630 if minrev == maxrev:
1629 revrange = minrev
1631 revrange = minrev
1630 else:
1632 else:
1631 revrange = '%s:%s' % (minrev, maxrev)
1633 revrange = '%s:%s' % (minrev, maxrev)
1632 draft = len(repo.revs('%ld and draft()', revs))
1634 draft = len(repo.revs('%ld and draft()', revs))
1633 secret = len(repo.revs('%ld and secret()', revs))
1635 secret = len(repo.revs('%ld and secret()', revs))
1634 if not (draft or secret):
1636 if not (draft or secret):
1635 msg = _('new changesets %s\n') % revrange
1637 msg = _('new changesets %s\n') % revrange
1636 elif draft and secret:
1638 elif draft and secret:
1637 msg = _('new changesets %s (%d drafts, %d secrets)\n')
1639 msg = _('new changesets %s (%d drafts, %d secrets)\n')
1638 msg %= (revrange, draft, secret)
1640 msg %= (revrange, draft, secret)
1639 elif draft:
1641 elif draft:
1640 msg = _('new changesets %s (%d drafts)\n')
1642 msg = _('new changesets %s (%d drafts)\n')
1641 msg %= (revrange, draft)
1643 msg %= (revrange, draft)
1642 elif secret:
1644 elif secret:
1643 msg = _('new changesets %s (%d secrets)\n')
1645 msg = _('new changesets %s (%d secrets)\n')
1644 msg %= (revrange, secret)
1646 msg %= (revrange, secret)
1645 else:
1647 else:
1646 errormsg = 'entered unreachable condition'
1648 errormsg = 'entered unreachable condition'
1647 raise error.ProgrammingError(errormsg)
1649 raise error.ProgrammingError(errormsg)
1648 repo.ui.status(msg)
1650 repo.ui.status(msg)
1649
1651
1650 # search new changesets directly pulled as obsolete
1652 # search new changesets directly pulled as obsolete
1651 duplicates = tr.changes.get('revduplicates', ())
1653 duplicates = tr.changes.get('revduplicates', ())
1652 obsadded = unfi.revs('(%d: + %ld) and obsolete()',
1654 obsadded = unfi.revs('(%d: + %ld) and obsolete()',
1653 origrepolen, duplicates)
1655 origrepolen, duplicates)
1654 cl = repo.changelog
1656 cl = repo.changelog
1655 extinctadded = [r for r in obsadded if r not in cl]
1657 extinctadded = [r for r in obsadded if r not in cl]
1656 if extinctadded:
1658 if extinctadded:
1657 # They are not just obsolete, but obsolete and invisible
1659 # They are not just obsolete, but obsolete and invisible
1658 # we call them "extinct" internally but the terms have not been
1660 # we call them "extinct" internally but the terms have not been
1659 # exposed to users.
1661 # exposed to users.
1660 msg = '(%d other changesets obsolete on arrival)\n'
1662 msg = '(%d other changesets obsolete on arrival)\n'
1661 repo.ui.status(msg % len(extinctadded))
1663 repo.ui.status(msg % len(extinctadded))
1662
1664
1663 @reportsummary
1665 @reportsummary
1664 def reportphasechanges(repo, tr):
1666 def reportphasechanges(repo, tr):
1665 """Report statistics of phase changes for changesets pre-existing
1667 """Report statistics of phase changes for changesets pre-existing
1666 pull/unbundle.
1668 pull/unbundle.
1667 """
1669 """
1668 origrepolen = tr.changes.get('origrepolen', len(repo))
1670 origrepolen = tr.changes.get('origrepolen', len(repo))
1669 phasetracking = tr.changes.get('phases', {})
1671 phasetracking = tr.changes.get('phases', {})
1670 if not phasetracking:
1672 if not phasetracking:
1671 return
1673 return
1672 published = [
1674 published = [
1673 rev for rev, (old, new) in phasetracking.iteritems()
1675 rev for rev, (old, new) in phasetracking.iteritems()
1674 if new == phases.public and rev < origrepolen
1676 if new == phases.public and rev < origrepolen
1675 ]
1677 ]
1676 if not published:
1678 if not published:
1677 return
1679 return
1678 repo.ui.status(_('%d local changesets published\n')
1680 repo.ui.status(_('%d local changesets published\n')
1679 % len(published))
1681 % len(published))
1680
1682
1681 def getinstabilitymessage(delta, instability):
1683 def getinstabilitymessage(delta, instability):
1682 """function to return the message to show warning about new instabilities
1684 """function to return the message to show warning about new instabilities
1683
1685
1684 exists as a separate function so that extension can wrap to show more
1686 exists as a separate function so that extension can wrap to show more
1685 information like how to fix instabilities"""
1687 information like how to fix instabilities"""
1686 if delta > 0:
1688 if delta > 0:
1687 return _('%i new %s changesets\n') % (delta, instability)
1689 return _('%i new %s changesets\n') % (delta, instability)
1688
1690
1689 def nodesummaries(repo, nodes, maxnumnodes=4):
1691 def nodesummaries(repo, nodes, maxnumnodes=4):
1690 if len(nodes) <= maxnumnodes or repo.ui.verbose:
1692 if len(nodes) <= maxnumnodes or repo.ui.verbose:
1691 return ' '.join(short(h) for h in nodes)
1693 return ' '.join(short(h) for h in nodes)
1692 first = ' '.join(short(h) for h in nodes[:maxnumnodes])
1694 first = ' '.join(short(h) for h in nodes[:maxnumnodes])
1693 return _("%s and %d others") % (first, len(nodes) - maxnumnodes)
1695 return _("%s and %d others") % (first, len(nodes) - maxnumnodes)
1694
1696
1695 def enforcesinglehead(repo, tr, desc):
1697 def enforcesinglehead(repo, tr, desc):
1696 """check that no named branch has multiple heads"""
1698 """check that no named branch has multiple heads"""
1697 if desc in ('strip', 'repair'):
1699 if desc in ('strip', 'repair'):
1698 # skip the logic during strip
1700 # skip the logic during strip
1699 return
1701 return
1700 visible = repo.filtered('visible')
1702 visible = repo.filtered('visible')
1701 # possible improvement: we could restrict the check to affected branch
1703 # possible improvement: we could restrict the check to affected branch
1702 for name, heads in visible.branchmap().iteritems():
1704 for name, heads in visible.branchmap().iteritems():
1703 if len(heads) > 1:
1705 if len(heads) > 1:
1704 msg = _('rejecting multiple heads on branch "%s"')
1706 msg = _('rejecting multiple heads on branch "%s"')
1705 msg %= name
1707 msg %= name
1706 hint = _('%d heads: %s')
1708 hint = _('%d heads: %s')
1707 hint %= (len(heads), nodesummaries(repo, heads))
1709 hint %= (len(heads), nodesummaries(repo, heads))
1708 raise error.Abort(msg, hint=hint)
1710 raise error.Abort(msg, hint=hint)
1709
1711
1710 def wrapconvertsink(sink):
1712 def wrapconvertsink(sink):
1711 """Allow extensions to wrap the sink returned by convcmd.convertsink()
1713 """Allow extensions to wrap the sink returned by convcmd.convertsink()
1712 before it is used, whether or not the convert extension was formally loaded.
1714 before it is used, whether or not the convert extension was formally loaded.
1713 """
1715 """
1714 return sink
1716 return sink
1715
1717
1716 def unhidehashlikerevs(repo, specs, hiddentype):
1718 def unhidehashlikerevs(repo, specs, hiddentype):
1717 """parse the user specs and unhide changesets whose hash or revision number
1719 """parse the user specs and unhide changesets whose hash or revision number
1718 is passed.
1720 is passed.
1719
1721
1720 hiddentype can be: 1) 'warn': warn while unhiding changesets
1722 hiddentype can be: 1) 'warn': warn while unhiding changesets
1721 2) 'nowarn': don't warn while unhiding changesets
1723 2) 'nowarn': don't warn while unhiding changesets
1722
1724
1723 returns a repo object with the required changesets unhidden
1725 returns a repo object with the required changesets unhidden
1724 """
1726 """
1725 if not repo.filtername or not repo.ui.configbool('experimental',
1727 if not repo.filtername or not repo.ui.configbool('experimental',
1726 'directaccess'):
1728 'directaccess'):
1727 return repo
1729 return repo
1728
1730
1729 if repo.filtername not in ('visible', 'visible-hidden'):
1731 if repo.filtername not in ('visible', 'visible-hidden'):
1730 return repo
1732 return repo
1731
1733
1732 symbols = set()
1734 symbols = set()
1733 for spec in specs:
1735 for spec in specs:
1734 try:
1736 try:
1735 tree = revsetlang.parse(spec)
1737 tree = revsetlang.parse(spec)
1736 except error.ParseError: # will be reported by scmutil.revrange()
1738 except error.ParseError: # will be reported by scmutil.revrange()
1737 continue
1739 continue
1738
1740
1739 symbols.update(revsetlang.gethashlikesymbols(tree))
1741 symbols.update(revsetlang.gethashlikesymbols(tree))
1740
1742
1741 if not symbols:
1743 if not symbols:
1742 return repo
1744 return repo
1743
1745
1744 revs = _getrevsfromsymbols(repo, symbols)
1746 revs = _getrevsfromsymbols(repo, symbols)
1745
1747
1746 if not revs:
1748 if not revs:
1747 return repo
1749 return repo
1748
1750
1749 if hiddentype == 'warn':
1751 if hiddentype == 'warn':
1750 unfi = repo.unfiltered()
1752 unfi = repo.unfiltered()
1751 revstr = ", ".join([pycompat.bytestr(unfi[l]) for l in revs])
1753 revstr = ", ".join([pycompat.bytestr(unfi[l]) for l in revs])
1752 repo.ui.warn(_("warning: accessing hidden changesets for write "
1754 repo.ui.warn(_("warning: accessing hidden changesets for write "
1753 "operation: %s\n") % revstr)
1755 "operation: %s\n") % revstr)
1754
1756
1755 # we have to use new filtername to separate branch/tags cache until we can
1757 # we have to use new filtername to separate branch/tags cache until we can
1756 # disbale these cache when revisions are dynamically pinned.
1758 # disbale these cache when revisions are dynamically pinned.
1757 return repo.filtered('visible-hidden', revs)
1759 return repo.filtered('visible-hidden', revs)
1758
1760
1759 def _getrevsfromsymbols(repo, symbols):
1761 def _getrevsfromsymbols(repo, symbols):
1760 """parse the list of symbols and returns a set of revision numbers of hidden
1762 """parse the list of symbols and returns a set of revision numbers of hidden
1761 changesets present in symbols"""
1763 changesets present in symbols"""
1762 revs = set()
1764 revs = set()
1763 unfi = repo.unfiltered()
1765 unfi = repo.unfiltered()
1764 unficl = unfi.changelog
1766 unficl = unfi.changelog
1765 cl = repo.changelog
1767 cl = repo.changelog
1766 tiprev = len(unficl)
1768 tiprev = len(unficl)
1767 allowrevnums = repo.ui.configbool('experimental', 'directaccess.revnums')
1769 allowrevnums = repo.ui.configbool('experimental', 'directaccess.revnums')
1768 for s in symbols:
1770 for s in symbols:
1769 try:
1771 try:
1770 n = int(s)
1772 n = int(s)
1771 if n <= tiprev:
1773 if n <= tiprev:
1772 if not allowrevnums:
1774 if not allowrevnums:
1773 continue
1775 continue
1774 else:
1776 else:
1775 if n not in cl:
1777 if n not in cl:
1776 revs.add(n)
1778 revs.add(n)
1777 continue
1779 continue
1778 except ValueError:
1780 except ValueError:
1779 pass
1781 pass
1780
1782
1781 try:
1783 try:
1782 s = resolvehexnodeidprefix(unfi, s)
1784 s = resolvehexnodeidprefix(unfi, s)
1783 except (error.LookupError, error.WdirUnsupported):
1785 except (error.LookupError, error.WdirUnsupported):
1784 s = None
1786 s = None
1785
1787
1786 if s is not None:
1788 if s is not None:
1787 rev = unficl.rev(s)
1789 rev = unficl.rev(s)
1788 if rev not in cl:
1790 if rev not in cl:
1789 revs.add(rev)
1791 revs.add(rev)
1790
1792
1791 return revs
1793 return revs
1792
1794
1793 def bookmarkrevs(repo, mark):
1795 def bookmarkrevs(repo, mark):
1794 """
1796 """
1795 Select revisions reachable by a given bookmark
1797 Select revisions reachable by a given bookmark
1796 """
1798 """
1797 return repo.revs("ancestors(bookmark(%s)) - "
1799 return repo.revs("ancestors(bookmark(%s)) - "
1798 "ancestors(head() and not bookmark(%s)) - "
1800 "ancestors(head() and not bookmark(%s)) - "
1799 "ancestors(bookmark() and not bookmark(%s))",
1801 "ancestors(bookmark() and not bookmark(%s))",
1800 mark, mark, mark)
1802 mark, mark, mark)
General Comments 0
You need to be logged in to leave comments. Login now