##// END OF EJS Templates
scmutil: expand long "one-liner"...
Boris Feld -
r39926:1c3f1491 default
parent child Browse files
Show More
@@ -1,1774 +1,1776 b''
1 # scmutil.py - Mercurial core utility functions
1 # scmutil.py - Mercurial core utility functions
2 #
2 #
3 # Copyright Matt Mackall <mpm@selenic.com>
3 # Copyright Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import glob
11 import glob
12 import hashlib
12 import hashlib
13 import os
13 import os
14 import re
14 import re
15 import socket
15 import socket
16 import subprocess
16 import subprocess
17 import weakref
17 import weakref
18
18
19 from .i18n import _
19 from .i18n import _
20 from .node import (
20 from .node import (
21 bin,
21 bin,
22 hex,
22 hex,
23 nullid,
23 nullid,
24 short,
24 short,
25 wdirid,
25 wdirid,
26 wdirrev,
26 wdirrev,
27 )
27 )
28
28
29 from . import (
29 from . import (
30 encoding,
30 encoding,
31 error,
31 error,
32 match as matchmod,
32 match as matchmod,
33 obsolete,
33 obsolete,
34 obsutil,
34 obsutil,
35 pathutil,
35 pathutil,
36 phases,
36 phases,
37 policy,
37 policy,
38 pycompat,
38 pycompat,
39 revsetlang,
39 revsetlang,
40 similar,
40 similar,
41 url,
41 url,
42 util,
42 util,
43 vfs,
43 vfs,
44 )
44 )
45
45
46 from .utils import (
46 from .utils import (
47 procutil,
47 procutil,
48 stringutil,
48 stringutil,
49 )
49 )
50
50
51 if pycompat.iswindows:
51 if pycompat.iswindows:
52 from . import scmwindows as scmplatform
52 from . import scmwindows as scmplatform
53 else:
53 else:
54 from . import scmposix as scmplatform
54 from . import scmposix as scmplatform
55
55
56 parsers = policy.importmod(r'parsers')
56 parsers = policy.importmod(r'parsers')
57
57
58 termsize = scmplatform.termsize
58 termsize = scmplatform.termsize
59
59
60 class status(tuple):
60 class status(tuple):
61 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
61 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
62 and 'ignored' properties are only relevant to the working copy.
62 and 'ignored' properties are only relevant to the working copy.
63 '''
63 '''
64
64
65 __slots__ = ()
65 __slots__ = ()
66
66
67 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
67 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
68 clean):
68 clean):
69 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
69 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
70 ignored, clean))
70 ignored, clean))
71
71
72 @property
72 @property
73 def modified(self):
73 def modified(self):
74 '''files that have been modified'''
74 '''files that have been modified'''
75 return self[0]
75 return self[0]
76
76
77 @property
77 @property
78 def added(self):
78 def added(self):
79 '''files that have been added'''
79 '''files that have been added'''
80 return self[1]
80 return self[1]
81
81
82 @property
82 @property
83 def removed(self):
83 def removed(self):
84 '''files that have been removed'''
84 '''files that have been removed'''
85 return self[2]
85 return self[2]
86
86
87 @property
87 @property
88 def deleted(self):
88 def deleted(self):
89 '''files that are in the dirstate, but have been deleted from the
89 '''files that are in the dirstate, but have been deleted from the
90 working copy (aka "missing")
90 working copy (aka "missing")
91 '''
91 '''
92 return self[3]
92 return self[3]
93
93
94 @property
94 @property
95 def unknown(self):
95 def unknown(self):
96 '''files not in the dirstate that are not ignored'''
96 '''files not in the dirstate that are not ignored'''
97 return self[4]
97 return self[4]
98
98
99 @property
99 @property
100 def ignored(self):
100 def ignored(self):
101 '''files not in the dirstate that are ignored (by _dirignore())'''
101 '''files not in the dirstate that are ignored (by _dirignore())'''
102 return self[5]
102 return self[5]
103
103
104 @property
104 @property
105 def clean(self):
105 def clean(self):
106 '''files that have not been modified'''
106 '''files that have not been modified'''
107 return self[6]
107 return self[6]
108
108
109 def __repr__(self, *args, **kwargs):
109 def __repr__(self, *args, **kwargs):
110 return ((r'<status modified=%s, added=%s, removed=%s, deleted=%s, '
110 return ((r'<status modified=%s, added=%s, removed=%s, deleted=%s, '
111 r'unknown=%s, ignored=%s, clean=%s>') %
111 r'unknown=%s, ignored=%s, clean=%s>') %
112 tuple(pycompat.sysstr(stringutil.pprint(v)) for v in self))
112 tuple(pycompat.sysstr(stringutil.pprint(v)) for v in self))
113
113
114 def itersubrepos(ctx1, ctx2):
114 def itersubrepos(ctx1, ctx2):
115 """find subrepos in ctx1 or ctx2"""
115 """find subrepos in ctx1 or ctx2"""
116 # Create a (subpath, ctx) mapping where we prefer subpaths from
116 # Create a (subpath, ctx) mapping where we prefer subpaths from
117 # ctx1. The subpaths from ctx2 are important when the .hgsub file
117 # ctx1. The subpaths from ctx2 are important when the .hgsub file
118 # has been modified (in ctx2) but not yet committed (in ctx1).
118 # has been modified (in ctx2) but not yet committed (in ctx1).
119 subpaths = dict.fromkeys(ctx2.substate, ctx2)
119 subpaths = dict.fromkeys(ctx2.substate, ctx2)
120 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
120 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
121
121
122 missing = set()
122 missing = set()
123
123
124 for subpath in ctx2.substate:
124 for subpath in ctx2.substate:
125 if subpath not in ctx1.substate:
125 if subpath not in ctx1.substate:
126 del subpaths[subpath]
126 del subpaths[subpath]
127 missing.add(subpath)
127 missing.add(subpath)
128
128
129 for subpath, ctx in sorted(subpaths.iteritems()):
129 for subpath, ctx in sorted(subpaths.iteritems()):
130 yield subpath, ctx.sub(subpath)
130 yield subpath, ctx.sub(subpath)
131
131
132 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
132 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
133 # status and diff will have an accurate result when it does
133 # status and diff will have an accurate result when it does
134 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
134 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
135 # against itself.
135 # against itself.
136 for subpath in missing:
136 for subpath in missing:
137 yield subpath, ctx2.nullsub(subpath, ctx1)
137 yield subpath, ctx2.nullsub(subpath, ctx1)
138
138
139 def nochangesfound(ui, repo, excluded=None):
139 def nochangesfound(ui, repo, excluded=None):
140 '''Report no changes for push/pull, excluded is None or a list of
140 '''Report no changes for push/pull, excluded is None or a list of
141 nodes excluded from the push/pull.
141 nodes excluded from the push/pull.
142 '''
142 '''
143 secretlist = []
143 secretlist = []
144 if excluded:
144 if excluded:
145 for n in excluded:
145 for n in excluded:
146 ctx = repo[n]
146 ctx = repo[n]
147 if ctx.phase() >= phases.secret and not ctx.extinct():
147 if ctx.phase() >= phases.secret and not ctx.extinct():
148 secretlist.append(n)
148 secretlist.append(n)
149
149
150 if secretlist:
150 if secretlist:
151 ui.status(_("no changes found (ignored %d secret changesets)\n")
151 ui.status(_("no changes found (ignored %d secret changesets)\n")
152 % len(secretlist))
152 % len(secretlist))
153 else:
153 else:
154 ui.status(_("no changes found\n"))
154 ui.status(_("no changes found\n"))
155
155
156 def callcatch(ui, func):
156 def callcatch(ui, func):
157 """call func() with global exception handling
157 """call func() with global exception handling
158
158
159 return func() if no exception happens. otherwise do some error handling
159 return func() if no exception happens. otherwise do some error handling
160 and return an exit code accordingly. does not handle all exceptions.
160 and return an exit code accordingly. does not handle all exceptions.
161 """
161 """
162 try:
162 try:
163 try:
163 try:
164 return func()
164 return func()
165 except: # re-raises
165 except: # re-raises
166 ui.traceback()
166 ui.traceback()
167 raise
167 raise
168 # Global exception handling, alphabetically
168 # Global exception handling, alphabetically
169 # Mercurial-specific first, followed by built-in and library exceptions
169 # Mercurial-specific first, followed by built-in and library exceptions
170 except error.LockHeld as inst:
170 except error.LockHeld as inst:
171 if inst.errno == errno.ETIMEDOUT:
171 if inst.errno == errno.ETIMEDOUT:
172 reason = _('timed out waiting for lock held by %r') % inst.locker
172 reason = _('timed out waiting for lock held by %r') % inst.locker
173 else:
173 else:
174 reason = _('lock held by %r') % inst.locker
174 reason = _('lock held by %r') % inst.locker
175 ui.error(_("abort: %s: %s\n") % (
175 ui.error(_("abort: %s: %s\n") % (
176 inst.desc or stringutil.forcebytestr(inst.filename), reason))
176 inst.desc or stringutil.forcebytestr(inst.filename), reason))
177 if not inst.locker:
177 if not inst.locker:
178 ui.error(_("(lock might be very busy)\n"))
178 ui.error(_("(lock might be very busy)\n"))
179 except error.LockUnavailable as inst:
179 except error.LockUnavailable as inst:
180 ui.error(_("abort: could not lock %s: %s\n") %
180 ui.error(_("abort: could not lock %s: %s\n") %
181 (inst.desc or stringutil.forcebytestr(inst.filename),
181 (inst.desc or stringutil.forcebytestr(inst.filename),
182 encoding.strtolocal(inst.strerror)))
182 encoding.strtolocal(inst.strerror)))
183 except error.OutOfBandError as inst:
183 except error.OutOfBandError as inst:
184 if inst.args:
184 if inst.args:
185 msg = _("abort: remote error:\n")
185 msg = _("abort: remote error:\n")
186 else:
186 else:
187 msg = _("abort: remote error\n")
187 msg = _("abort: remote error\n")
188 ui.error(msg)
188 ui.error(msg)
189 if inst.args:
189 if inst.args:
190 ui.error(''.join(inst.args))
190 ui.error(''.join(inst.args))
191 if inst.hint:
191 if inst.hint:
192 ui.error('(%s)\n' % inst.hint)
192 ui.error('(%s)\n' % inst.hint)
193 except error.RepoError as inst:
193 except error.RepoError as inst:
194 ui.error(_("abort: %s!\n") % inst)
194 ui.error(_("abort: %s!\n") % inst)
195 if inst.hint:
195 if inst.hint:
196 ui.error(_("(%s)\n") % inst.hint)
196 ui.error(_("(%s)\n") % inst.hint)
197 except error.ResponseError as inst:
197 except error.ResponseError as inst:
198 ui.error(_("abort: %s") % inst.args[0])
198 ui.error(_("abort: %s") % inst.args[0])
199 msg = inst.args[1]
199 msg = inst.args[1]
200 if isinstance(msg, type(u'')):
200 if isinstance(msg, type(u'')):
201 msg = pycompat.sysbytes(msg)
201 msg = pycompat.sysbytes(msg)
202 if not isinstance(msg, bytes):
202 if not isinstance(msg, bytes):
203 ui.error(" %r\n" % (msg,))
203 ui.error(" %r\n" % (msg,))
204 elif not msg:
204 elif not msg:
205 ui.error(_(" empty string\n"))
205 ui.error(_(" empty string\n"))
206 else:
206 else:
207 ui.error("\n%r\n" % pycompat.bytestr(stringutil.ellipsis(msg)))
207 ui.error("\n%r\n" % pycompat.bytestr(stringutil.ellipsis(msg)))
208 except error.CensoredNodeError as inst:
208 except error.CensoredNodeError as inst:
209 ui.error(_("abort: file censored %s!\n") % inst)
209 ui.error(_("abort: file censored %s!\n") % inst)
210 except error.StorageError as inst:
210 except error.StorageError as inst:
211 ui.error(_("abort: %s!\n") % inst)
211 ui.error(_("abort: %s!\n") % inst)
212 except error.InterventionRequired as inst:
212 except error.InterventionRequired as inst:
213 ui.error("%s\n" % inst)
213 ui.error("%s\n" % inst)
214 if inst.hint:
214 if inst.hint:
215 ui.error(_("(%s)\n") % inst.hint)
215 ui.error(_("(%s)\n") % inst.hint)
216 return 1
216 return 1
217 except error.WdirUnsupported:
217 except error.WdirUnsupported:
218 ui.error(_("abort: working directory revision cannot be specified\n"))
218 ui.error(_("abort: working directory revision cannot be specified\n"))
219 except error.Abort as inst:
219 except error.Abort as inst:
220 ui.error(_("abort: %s\n") % inst)
220 ui.error(_("abort: %s\n") % inst)
221 if inst.hint:
221 if inst.hint:
222 ui.error(_("(%s)\n") % inst.hint)
222 ui.error(_("(%s)\n") % inst.hint)
223 except ImportError as inst:
223 except ImportError as inst:
224 ui.error(_("abort: %s!\n") % stringutil.forcebytestr(inst))
224 ui.error(_("abort: %s!\n") % stringutil.forcebytestr(inst))
225 m = stringutil.forcebytestr(inst).split()[-1]
225 m = stringutil.forcebytestr(inst).split()[-1]
226 if m in "mpatch bdiff".split():
226 if m in "mpatch bdiff".split():
227 ui.error(_("(did you forget to compile extensions?)\n"))
227 ui.error(_("(did you forget to compile extensions?)\n"))
228 elif m in "zlib".split():
228 elif m in "zlib".split():
229 ui.error(_("(is your Python install correct?)\n"))
229 ui.error(_("(is your Python install correct?)\n"))
230 except IOError as inst:
230 except IOError as inst:
231 if util.safehasattr(inst, "code"):
231 if util.safehasattr(inst, "code"):
232 ui.error(_("abort: %s\n") % stringutil.forcebytestr(inst))
232 ui.error(_("abort: %s\n") % stringutil.forcebytestr(inst))
233 elif util.safehasattr(inst, "reason"):
233 elif util.safehasattr(inst, "reason"):
234 try: # usually it is in the form (errno, strerror)
234 try: # usually it is in the form (errno, strerror)
235 reason = inst.reason.args[1]
235 reason = inst.reason.args[1]
236 except (AttributeError, IndexError):
236 except (AttributeError, IndexError):
237 # it might be anything, for example a string
237 # it might be anything, for example a string
238 reason = inst.reason
238 reason = inst.reason
239 if isinstance(reason, pycompat.unicode):
239 if isinstance(reason, pycompat.unicode):
240 # SSLError of Python 2.7.9 contains a unicode
240 # SSLError of Python 2.7.9 contains a unicode
241 reason = encoding.unitolocal(reason)
241 reason = encoding.unitolocal(reason)
242 ui.error(_("abort: error: %s\n") % reason)
242 ui.error(_("abort: error: %s\n") % reason)
243 elif (util.safehasattr(inst, "args")
243 elif (util.safehasattr(inst, "args")
244 and inst.args and inst.args[0] == errno.EPIPE):
244 and inst.args and inst.args[0] == errno.EPIPE):
245 pass
245 pass
246 elif getattr(inst, "strerror", None):
246 elif getattr(inst, "strerror", None):
247 if getattr(inst, "filename", None):
247 if getattr(inst, "filename", None):
248 ui.error(_("abort: %s: %s\n") % (
248 ui.error(_("abort: %s: %s\n") % (
249 encoding.strtolocal(inst.strerror),
249 encoding.strtolocal(inst.strerror),
250 stringutil.forcebytestr(inst.filename)))
250 stringutil.forcebytestr(inst.filename)))
251 else:
251 else:
252 ui.error(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
252 ui.error(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
253 else:
253 else:
254 raise
254 raise
255 except OSError as inst:
255 except OSError as inst:
256 if getattr(inst, "filename", None) is not None:
256 if getattr(inst, "filename", None) is not None:
257 ui.error(_("abort: %s: '%s'\n") % (
257 ui.error(_("abort: %s: '%s'\n") % (
258 encoding.strtolocal(inst.strerror),
258 encoding.strtolocal(inst.strerror),
259 stringutil.forcebytestr(inst.filename)))
259 stringutil.forcebytestr(inst.filename)))
260 else:
260 else:
261 ui.error(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
261 ui.error(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
262 except MemoryError:
262 except MemoryError:
263 ui.error(_("abort: out of memory\n"))
263 ui.error(_("abort: out of memory\n"))
264 except SystemExit as inst:
264 except SystemExit as inst:
265 # Commands shouldn't sys.exit directly, but give a return code.
265 # Commands shouldn't sys.exit directly, but give a return code.
266 # Just in case catch this and and pass exit code to caller.
266 # Just in case catch this and and pass exit code to caller.
267 return inst.code
267 return inst.code
268 except socket.error as inst:
268 except socket.error as inst:
269 ui.error(_("abort: %s\n") % stringutil.forcebytestr(inst.args[-1]))
269 ui.error(_("abort: %s\n") % stringutil.forcebytestr(inst.args[-1]))
270
270
271 return -1
271 return -1
272
272
273 def checknewlabel(repo, lbl, kind):
273 def checknewlabel(repo, lbl, kind):
274 # Do not use the "kind" parameter in ui output.
274 # Do not use the "kind" parameter in ui output.
275 # It makes strings difficult to translate.
275 # It makes strings difficult to translate.
276 if lbl in ['tip', '.', 'null']:
276 if lbl in ['tip', '.', 'null']:
277 raise error.Abort(_("the name '%s' is reserved") % lbl)
277 raise error.Abort(_("the name '%s' is reserved") % lbl)
278 for c in (':', '\0', '\n', '\r'):
278 for c in (':', '\0', '\n', '\r'):
279 if c in lbl:
279 if c in lbl:
280 raise error.Abort(
280 raise error.Abort(
281 _("%r cannot be used in a name") % pycompat.bytestr(c))
281 _("%r cannot be used in a name") % pycompat.bytestr(c))
282 try:
282 try:
283 int(lbl)
283 int(lbl)
284 raise error.Abort(_("cannot use an integer as a name"))
284 raise error.Abort(_("cannot use an integer as a name"))
285 except ValueError:
285 except ValueError:
286 pass
286 pass
287 if lbl.strip() != lbl:
287 if lbl.strip() != lbl:
288 raise error.Abort(_("leading or trailing whitespace in name %r") % lbl)
288 raise error.Abort(_("leading or trailing whitespace in name %r") % lbl)
289
289
290 def checkfilename(f):
290 def checkfilename(f):
291 '''Check that the filename f is an acceptable filename for a tracked file'''
291 '''Check that the filename f is an acceptable filename for a tracked file'''
292 if '\r' in f or '\n' in f:
292 if '\r' in f or '\n' in f:
293 raise error.Abort(_("'\\n' and '\\r' disallowed in filenames: %r")
293 raise error.Abort(_("'\\n' and '\\r' disallowed in filenames: %r")
294 % pycompat.bytestr(f))
294 % pycompat.bytestr(f))
295
295
296 def checkportable(ui, f):
296 def checkportable(ui, f):
297 '''Check if filename f is portable and warn or abort depending on config'''
297 '''Check if filename f is portable and warn or abort depending on config'''
298 checkfilename(f)
298 checkfilename(f)
299 abort, warn = checkportabilityalert(ui)
299 abort, warn = checkportabilityalert(ui)
300 if abort or warn:
300 if abort or warn:
301 msg = util.checkwinfilename(f)
301 msg = util.checkwinfilename(f)
302 if msg:
302 if msg:
303 msg = "%s: %s" % (msg, procutil.shellquote(f))
303 msg = "%s: %s" % (msg, procutil.shellquote(f))
304 if abort:
304 if abort:
305 raise error.Abort(msg)
305 raise error.Abort(msg)
306 ui.warn(_("warning: %s\n") % msg)
306 ui.warn(_("warning: %s\n") % msg)
307
307
308 def checkportabilityalert(ui):
308 def checkportabilityalert(ui):
309 '''check if the user's config requests nothing, a warning, or abort for
309 '''check if the user's config requests nothing, a warning, or abort for
310 non-portable filenames'''
310 non-portable filenames'''
311 val = ui.config('ui', 'portablefilenames')
311 val = ui.config('ui', 'portablefilenames')
312 lval = val.lower()
312 lval = val.lower()
313 bval = stringutil.parsebool(val)
313 bval = stringutil.parsebool(val)
314 abort = pycompat.iswindows or lval == 'abort'
314 abort = pycompat.iswindows or lval == 'abort'
315 warn = bval or lval == 'warn'
315 warn = bval or lval == 'warn'
316 if bval is None and not (warn or abort or lval == 'ignore'):
316 if bval is None and not (warn or abort or lval == 'ignore'):
317 raise error.ConfigError(
317 raise error.ConfigError(
318 _("ui.portablefilenames value is invalid ('%s')") % val)
318 _("ui.portablefilenames value is invalid ('%s')") % val)
319 return abort, warn
319 return abort, warn
320
320
321 class casecollisionauditor(object):
321 class casecollisionauditor(object):
322 def __init__(self, ui, abort, dirstate):
322 def __init__(self, ui, abort, dirstate):
323 self._ui = ui
323 self._ui = ui
324 self._abort = abort
324 self._abort = abort
325 allfiles = '\0'.join(dirstate._map)
325 allfiles = '\0'.join(dirstate._map)
326 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
326 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
327 self._dirstate = dirstate
327 self._dirstate = dirstate
328 # The purpose of _newfiles is so that we don't complain about
328 # The purpose of _newfiles is so that we don't complain about
329 # case collisions if someone were to call this object with the
329 # case collisions if someone were to call this object with the
330 # same filename twice.
330 # same filename twice.
331 self._newfiles = set()
331 self._newfiles = set()
332
332
333 def __call__(self, f):
333 def __call__(self, f):
334 if f in self._newfiles:
334 if f in self._newfiles:
335 return
335 return
336 fl = encoding.lower(f)
336 fl = encoding.lower(f)
337 if fl in self._loweredfiles and f not in self._dirstate:
337 if fl in self._loweredfiles and f not in self._dirstate:
338 msg = _('possible case-folding collision for %s') % f
338 msg = _('possible case-folding collision for %s') % f
339 if self._abort:
339 if self._abort:
340 raise error.Abort(msg)
340 raise error.Abort(msg)
341 self._ui.warn(_("warning: %s\n") % msg)
341 self._ui.warn(_("warning: %s\n") % msg)
342 self._loweredfiles.add(fl)
342 self._loweredfiles.add(fl)
343 self._newfiles.add(f)
343 self._newfiles.add(f)
344
344
345 def filteredhash(repo, maxrev):
345 def filteredhash(repo, maxrev):
346 """build hash of filtered revisions in the current repoview.
346 """build hash of filtered revisions in the current repoview.
347
347
348 Multiple caches perform up-to-date validation by checking that the
348 Multiple caches perform up-to-date validation by checking that the
349 tiprev and tipnode stored in the cache file match the current repository.
349 tiprev and tipnode stored in the cache file match the current repository.
350 However, this is not sufficient for validating repoviews because the set
350 However, this is not sufficient for validating repoviews because the set
351 of revisions in the view may change without the repository tiprev and
351 of revisions in the view may change without the repository tiprev and
352 tipnode changing.
352 tipnode changing.
353
353
354 This function hashes all the revs filtered from the view and returns
354 This function hashes all the revs filtered from the view and returns
355 that SHA-1 digest.
355 that SHA-1 digest.
356 """
356 """
357 cl = repo.changelog
357 cl = repo.changelog
358 if not cl.filteredrevs:
358 if not cl.filteredrevs:
359 return None
359 return None
360 key = None
360 key = None
361 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
361 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
362 if revs:
362 if revs:
363 s = hashlib.sha1()
363 s = hashlib.sha1()
364 for rev in revs:
364 for rev in revs:
365 s.update('%d;' % rev)
365 s.update('%d;' % rev)
366 key = s.digest()
366 key = s.digest()
367 return key
367 return key
368
368
369 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
369 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
370 '''yield every hg repository under path, always recursively.
370 '''yield every hg repository under path, always recursively.
371 The recurse flag will only control recursion into repo working dirs'''
371 The recurse flag will only control recursion into repo working dirs'''
372 def errhandler(err):
372 def errhandler(err):
373 if err.filename == path:
373 if err.filename == path:
374 raise err
374 raise err
375 samestat = getattr(os.path, 'samestat', None)
375 samestat = getattr(os.path, 'samestat', None)
376 if followsym and samestat is not None:
376 if followsym and samestat is not None:
377 def adddir(dirlst, dirname):
377 def adddir(dirlst, dirname):
378 dirstat = os.stat(dirname)
378 dirstat = os.stat(dirname)
379 match = any(samestat(dirstat, lstdirstat) for lstdirstat in dirlst)
379 match = any(samestat(dirstat, lstdirstat) for lstdirstat in dirlst)
380 if not match:
380 if not match:
381 dirlst.append(dirstat)
381 dirlst.append(dirstat)
382 return not match
382 return not match
383 else:
383 else:
384 followsym = False
384 followsym = False
385
385
386 if (seen_dirs is None) and followsym:
386 if (seen_dirs is None) and followsym:
387 seen_dirs = []
387 seen_dirs = []
388 adddir(seen_dirs, path)
388 adddir(seen_dirs, path)
389 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
389 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
390 dirs.sort()
390 dirs.sort()
391 if '.hg' in dirs:
391 if '.hg' in dirs:
392 yield root # found a repository
392 yield root # found a repository
393 qroot = os.path.join(root, '.hg', 'patches')
393 qroot = os.path.join(root, '.hg', 'patches')
394 if os.path.isdir(os.path.join(qroot, '.hg')):
394 if os.path.isdir(os.path.join(qroot, '.hg')):
395 yield qroot # we have a patch queue repo here
395 yield qroot # we have a patch queue repo here
396 if recurse:
396 if recurse:
397 # avoid recursing inside the .hg directory
397 # avoid recursing inside the .hg directory
398 dirs.remove('.hg')
398 dirs.remove('.hg')
399 else:
399 else:
400 dirs[:] = [] # don't descend further
400 dirs[:] = [] # don't descend further
401 elif followsym:
401 elif followsym:
402 newdirs = []
402 newdirs = []
403 for d in dirs:
403 for d in dirs:
404 fname = os.path.join(root, d)
404 fname = os.path.join(root, d)
405 if adddir(seen_dirs, fname):
405 if adddir(seen_dirs, fname):
406 if os.path.islink(fname):
406 if os.path.islink(fname):
407 for hgname in walkrepos(fname, True, seen_dirs):
407 for hgname in walkrepos(fname, True, seen_dirs):
408 yield hgname
408 yield hgname
409 else:
409 else:
410 newdirs.append(d)
410 newdirs.append(d)
411 dirs[:] = newdirs
411 dirs[:] = newdirs
412
412
413 def binnode(ctx):
413 def binnode(ctx):
414 """Return binary node id for a given basectx"""
414 """Return binary node id for a given basectx"""
415 node = ctx.node()
415 node = ctx.node()
416 if node is None:
416 if node is None:
417 return wdirid
417 return wdirid
418 return node
418 return node
419
419
420 def intrev(ctx):
420 def intrev(ctx):
421 """Return integer for a given basectx that can be used in comparison or
421 """Return integer for a given basectx that can be used in comparison or
422 arithmetic operation"""
422 arithmetic operation"""
423 rev = ctx.rev()
423 rev = ctx.rev()
424 if rev is None:
424 if rev is None:
425 return wdirrev
425 return wdirrev
426 return rev
426 return rev
427
427
428 def formatchangeid(ctx):
428 def formatchangeid(ctx):
429 """Format changectx as '{rev}:{node|formatnode}', which is the default
429 """Format changectx as '{rev}:{node|formatnode}', which is the default
430 template provided by logcmdutil.changesettemplater"""
430 template provided by logcmdutil.changesettemplater"""
431 repo = ctx.repo()
431 repo = ctx.repo()
432 return formatrevnode(repo.ui, intrev(ctx), binnode(ctx))
432 return formatrevnode(repo.ui, intrev(ctx), binnode(ctx))
433
433
434 def formatrevnode(ui, rev, node):
434 def formatrevnode(ui, rev, node):
435 """Format given revision and node depending on the current verbosity"""
435 """Format given revision and node depending on the current verbosity"""
436 if ui.debugflag:
436 if ui.debugflag:
437 hexfunc = hex
437 hexfunc = hex
438 else:
438 else:
439 hexfunc = short
439 hexfunc = short
440 return '%d:%s' % (rev, hexfunc(node))
440 return '%d:%s' % (rev, hexfunc(node))
441
441
442 def resolvehexnodeidprefix(repo, prefix):
442 def resolvehexnodeidprefix(repo, prefix):
443 if (prefix.startswith('x') and
443 if (prefix.startswith('x') and
444 repo.ui.configbool('experimental', 'revisions.prefixhexnode')):
444 repo.ui.configbool('experimental', 'revisions.prefixhexnode')):
445 prefix = prefix[1:]
445 prefix = prefix[1:]
446 try:
446 try:
447 # Uses unfiltered repo because it's faster when prefix is ambiguous/
447 # Uses unfiltered repo because it's faster when prefix is ambiguous/
448 # This matches the shortesthexnodeidprefix() function below.
448 # This matches the shortesthexnodeidprefix() function below.
449 node = repo.unfiltered().changelog._partialmatch(prefix)
449 node = repo.unfiltered().changelog._partialmatch(prefix)
450 except error.AmbiguousPrefixLookupError:
450 except error.AmbiguousPrefixLookupError:
451 revset = repo.ui.config('experimental', 'revisions.disambiguatewithin')
451 revset = repo.ui.config('experimental', 'revisions.disambiguatewithin')
452 if revset:
452 if revset:
453 # Clear config to avoid infinite recursion
453 # Clear config to avoid infinite recursion
454 configoverrides = {('experimental',
454 configoverrides = {('experimental',
455 'revisions.disambiguatewithin'): None}
455 'revisions.disambiguatewithin'): None}
456 with repo.ui.configoverride(configoverrides):
456 with repo.ui.configoverride(configoverrides):
457 revs = repo.anyrevs([revset], user=True)
457 revs = repo.anyrevs([revset], user=True)
458 matches = []
458 matches = []
459 for rev in revs:
459 for rev in revs:
460 node = repo.changelog.node(rev)
460 node = repo.changelog.node(rev)
461 if hex(node).startswith(prefix):
461 if hex(node).startswith(prefix):
462 matches.append(node)
462 matches.append(node)
463 if len(matches) == 1:
463 if len(matches) == 1:
464 return matches[0]
464 return matches[0]
465 raise
465 raise
466 if node is None:
466 if node is None:
467 return
467 return
468 repo.changelog.rev(node) # make sure node isn't filtered
468 repo.changelog.rev(node) # make sure node isn't filtered
469 return node
469 return node
470
470
471 def mayberevnum(repo, prefix):
471 def mayberevnum(repo, prefix):
472 """Checks if the given prefix may be mistaken for a revision number"""
472 """Checks if the given prefix may be mistaken for a revision number"""
473 try:
473 try:
474 i = int(prefix)
474 i = int(prefix)
475 # if we are a pure int, then starting with zero will not be
475 # if we are a pure int, then starting with zero will not be
476 # confused as a rev; or, obviously, if the int is larger
476 # confused as a rev; or, obviously, if the int is larger
477 # than the value of the tip rev
477 # than the value of the tip rev
478 if prefix[0:1] == b'0' or i >= len(repo):
478 if prefix[0:1] == b'0' or i >= len(repo):
479 return False
479 return False
480 return True
480 return True
481 except ValueError:
481 except ValueError:
482 return False
482 return False
483
483
484 def shortesthexnodeidprefix(repo, node, minlength=1, cache=None):
484 def shortesthexnodeidprefix(repo, node, minlength=1, cache=None):
485 """Find the shortest unambiguous prefix that matches hexnode.
485 """Find the shortest unambiguous prefix that matches hexnode.
486
486
487 If "cache" is not None, it must be a dictionary that can be used for
487 If "cache" is not None, it must be a dictionary that can be used for
488 caching between calls to this method.
488 caching between calls to this method.
489 """
489 """
490 # _partialmatch() of filtered changelog could take O(len(repo)) time,
490 # _partialmatch() of filtered changelog could take O(len(repo)) time,
491 # which would be unacceptably slow. so we look for hash collision in
491 # which would be unacceptably slow. so we look for hash collision in
492 # unfiltered space, which means some hashes may be slightly longer.
492 # unfiltered space, which means some hashes may be slightly longer.
493
493
494 def disambiguate(prefix):
494 def disambiguate(prefix):
495 """Disambiguate against revnums."""
495 """Disambiguate against revnums."""
496 if repo.ui.configbool('experimental', 'revisions.prefixhexnode'):
496 if repo.ui.configbool('experimental', 'revisions.prefixhexnode'):
497 if mayberevnum(repo, prefix):
497 if mayberevnum(repo, prefix):
498 return 'x' + prefix
498 return 'x' + prefix
499 else:
499 else:
500 return prefix
500 return prefix
501
501
502 hexnode = hex(node)
502 hexnode = hex(node)
503 for length in range(len(prefix), len(hexnode) + 1):
503 for length in range(len(prefix), len(hexnode) + 1):
504 prefix = hexnode[:length]
504 prefix = hexnode[:length]
505 if not mayberevnum(repo, prefix):
505 if not mayberevnum(repo, prefix):
506 return prefix
506 return prefix
507
507
508 cl = repo.unfiltered().changelog
508 cl = repo.unfiltered().changelog
509 revset = repo.ui.config('experimental', 'revisions.disambiguatewithin')
509 revset = repo.ui.config('experimental', 'revisions.disambiguatewithin')
510 if revset:
510 if revset:
511 revs = None
511 revs = None
512 if cache is not None:
512 if cache is not None:
513 revs = cache.get('disambiguationrevset')
513 revs = cache.get('disambiguationrevset')
514 if revs is None:
514 if revs is None:
515 revs = repo.anyrevs([revset], user=True)
515 revs = repo.anyrevs([revset], user=True)
516 if cache is not None:
516 if cache is not None:
517 cache['disambiguationrevset'] = revs
517 cache['disambiguationrevset'] = revs
518 if cl.rev(node) in revs:
518 if cl.rev(node) in revs:
519 hexnode = hex(node)
519 hexnode = hex(node)
520 nodetree = None
520 nodetree = None
521 if cache is not None:
521 if cache is not None:
522 nodetree = cache.get('disambiguationnodetree')
522 nodetree = cache.get('disambiguationnodetree')
523 if not nodetree:
523 if not nodetree:
524 try:
524 try:
525 nodetree = parsers.nodetree(cl.index, len(revs))
525 nodetree = parsers.nodetree(cl.index, len(revs))
526 except AttributeError:
526 except AttributeError:
527 # no native nodetree
527 # no native nodetree
528 pass
528 pass
529 else:
529 else:
530 for r in revs:
530 for r in revs:
531 nodetree.insert(r)
531 nodetree.insert(r)
532 if cache is not None:
532 if cache is not None:
533 cache['disambiguationnodetree'] = nodetree
533 cache['disambiguationnodetree'] = nodetree
534 if nodetree is not None:
534 if nodetree is not None:
535 length = max(nodetree.shortest(node), minlength)
535 length = max(nodetree.shortest(node), minlength)
536 prefix = hexnode[:length]
536 prefix = hexnode[:length]
537 return disambiguate(prefix)
537 return disambiguate(prefix)
538 for length in range(minlength, len(hexnode) + 1):
538 for length in range(minlength, len(hexnode) + 1):
539 matches = []
539 matches = []
540 prefix = hexnode[:length]
540 prefix = hexnode[:length]
541 for rev in revs:
541 for rev in revs:
542 otherhexnode = repo[rev].hex()
542 otherhexnode = repo[rev].hex()
543 if prefix == otherhexnode[:length]:
543 if prefix == otherhexnode[:length]:
544 matches.append(otherhexnode)
544 matches.append(otherhexnode)
545 if len(matches) == 1:
545 if len(matches) == 1:
546 return disambiguate(prefix)
546 return disambiguate(prefix)
547
547
548 try:
548 try:
549 return disambiguate(cl.shortest(node, minlength))
549 return disambiguate(cl.shortest(node, minlength))
550 except error.LookupError:
550 except error.LookupError:
551 raise error.RepoLookupError()
551 raise error.RepoLookupError()
552
552
553 def isrevsymbol(repo, symbol):
553 def isrevsymbol(repo, symbol):
554 """Checks if a symbol exists in the repo.
554 """Checks if a symbol exists in the repo.
555
555
556 See revsymbol() for details. Raises error.AmbiguousPrefixLookupError if the
556 See revsymbol() for details. Raises error.AmbiguousPrefixLookupError if the
557 symbol is an ambiguous nodeid prefix.
557 symbol is an ambiguous nodeid prefix.
558 """
558 """
559 try:
559 try:
560 revsymbol(repo, symbol)
560 revsymbol(repo, symbol)
561 return True
561 return True
562 except error.RepoLookupError:
562 except error.RepoLookupError:
563 return False
563 return False
564
564
565 def revsymbol(repo, symbol):
565 def revsymbol(repo, symbol):
566 """Returns a context given a single revision symbol (as string).
566 """Returns a context given a single revision symbol (as string).
567
567
568 This is similar to revsingle(), but accepts only a single revision symbol,
568 This is similar to revsingle(), but accepts only a single revision symbol,
569 i.e. things like ".", "tip", "1234", "deadbeef", "my-bookmark" work, but
569 i.e. things like ".", "tip", "1234", "deadbeef", "my-bookmark" work, but
570 not "max(public())".
570 not "max(public())".
571 """
571 """
572 if not isinstance(symbol, bytes):
572 if not isinstance(symbol, bytes):
573 msg = ("symbol (%s of type %s) was not a string, did you mean "
573 msg = ("symbol (%s of type %s) was not a string, did you mean "
574 "repo[symbol]?" % (symbol, type(symbol)))
574 "repo[symbol]?" % (symbol, type(symbol)))
575 raise error.ProgrammingError(msg)
575 raise error.ProgrammingError(msg)
576 try:
576 try:
577 if symbol in ('.', 'tip', 'null'):
577 if symbol in ('.', 'tip', 'null'):
578 return repo[symbol]
578 return repo[symbol]
579
579
580 try:
580 try:
581 r = int(symbol)
581 r = int(symbol)
582 if '%d' % r != symbol:
582 if '%d' % r != symbol:
583 raise ValueError
583 raise ValueError
584 l = len(repo.changelog)
584 l = len(repo.changelog)
585 if r < 0:
585 if r < 0:
586 r += l
586 r += l
587 if r < 0 or r >= l and r != wdirrev:
587 if r < 0 or r >= l and r != wdirrev:
588 raise ValueError
588 raise ValueError
589 return repo[r]
589 return repo[r]
590 except error.FilteredIndexError:
590 except error.FilteredIndexError:
591 raise
591 raise
592 except (ValueError, OverflowError, IndexError):
592 except (ValueError, OverflowError, IndexError):
593 pass
593 pass
594
594
595 if len(symbol) == 40:
595 if len(symbol) == 40:
596 try:
596 try:
597 node = bin(symbol)
597 node = bin(symbol)
598 rev = repo.changelog.rev(node)
598 rev = repo.changelog.rev(node)
599 return repo[rev]
599 return repo[rev]
600 except error.FilteredLookupError:
600 except error.FilteredLookupError:
601 raise
601 raise
602 except (TypeError, LookupError):
602 except (TypeError, LookupError):
603 pass
603 pass
604
604
605 # look up bookmarks through the name interface
605 # look up bookmarks through the name interface
606 try:
606 try:
607 node = repo.names.singlenode(repo, symbol)
607 node = repo.names.singlenode(repo, symbol)
608 rev = repo.changelog.rev(node)
608 rev = repo.changelog.rev(node)
609 return repo[rev]
609 return repo[rev]
610 except KeyError:
610 except KeyError:
611 pass
611 pass
612
612
613 node = resolvehexnodeidprefix(repo, symbol)
613 node = resolvehexnodeidprefix(repo, symbol)
614 if node is not None:
614 if node is not None:
615 rev = repo.changelog.rev(node)
615 rev = repo.changelog.rev(node)
616 return repo[rev]
616 return repo[rev]
617
617
618 raise error.RepoLookupError(_("unknown revision '%s'") % symbol)
618 raise error.RepoLookupError(_("unknown revision '%s'") % symbol)
619
619
620 except error.WdirUnsupported:
620 except error.WdirUnsupported:
621 return repo[None]
621 return repo[None]
622 except (error.FilteredIndexError, error.FilteredLookupError,
622 except (error.FilteredIndexError, error.FilteredLookupError,
623 error.FilteredRepoLookupError):
623 error.FilteredRepoLookupError):
624 raise _filterederror(repo, symbol)
624 raise _filterederror(repo, symbol)
625
625
626 def _filterederror(repo, changeid):
626 def _filterederror(repo, changeid):
627 """build an exception to be raised about a filtered changeid
627 """build an exception to be raised about a filtered changeid
628
628
629 This is extracted in a function to help extensions (eg: evolve) to
629 This is extracted in a function to help extensions (eg: evolve) to
630 experiment with various message variants."""
630 experiment with various message variants."""
631 if repo.filtername.startswith('visible'):
631 if repo.filtername.startswith('visible'):
632
632
633 # Check if the changeset is obsolete
633 # Check if the changeset is obsolete
634 unfilteredrepo = repo.unfiltered()
634 unfilteredrepo = repo.unfiltered()
635 ctx = revsymbol(unfilteredrepo, changeid)
635 ctx = revsymbol(unfilteredrepo, changeid)
636
636
637 # If the changeset is obsolete, enrich the message with the reason
637 # If the changeset is obsolete, enrich the message with the reason
638 # that made this changeset not visible
638 # that made this changeset not visible
639 if ctx.obsolete():
639 if ctx.obsolete():
640 msg = obsutil._getfilteredreason(repo, changeid, ctx)
640 msg = obsutil._getfilteredreason(repo, changeid, ctx)
641 else:
641 else:
642 msg = _("hidden revision '%s'") % changeid
642 msg = _("hidden revision '%s'") % changeid
643
643
644 hint = _('use --hidden to access hidden revisions')
644 hint = _('use --hidden to access hidden revisions')
645
645
646 return error.FilteredRepoLookupError(msg, hint=hint)
646 return error.FilteredRepoLookupError(msg, hint=hint)
647 msg = _("filtered revision '%s' (not in '%s' subset)")
647 msg = _("filtered revision '%s' (not in '%s' subset)")
648 msg %= (changeid, repo.filtername)
648 msg %= (changeid, repo.filtername)
649 return error.FilteredRepoLookupError(msg)
649 return error.FilteredRepoLookupError(msg)
650
650
651 def revsingle(repo, revspec, default='.', localalias=None):
651 def revsingle(repo, revspec, default='.', localalias=None):
652 if not revspec and revspec != 0:
652 if not revspec and revspec != 0:
653 return repo[default]
653 return repo[default]
654
654
655 l = revrange(repo, [revspec], localalias=localalias)
655 l = revrange(repo, [revspec], localalias=localalias)
656 if not l:
656 if not l:
657 raise error.Abort(_('empty revision set'))
657 raise error.Abort(_('empty revision set'))
658 return repo[l.last()]
658 return repo[l.last()]
659
659
660 def _pairspec(revspec):
660 def _pairspec(revspec):
661 tree = revsetlang.parse(revspec)
661 tree = revsetlang.parse(revspec)
662 return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall')
662 return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall')
663
663
664 def revpair(repo, revs):
664 def revpair(repo, revs):
665 if not revs:
665 if not revs:
666 return repo['.'], repo[None]
666 return repo['.'], repo[None]
667
667
668 l = revrange(repo, revs)
668 l = revrange(repo, revs)
669
669
670 if not l:
670 if not l:
671 first = second = None
671 first = second = None
672 elif l.isascending():
672 elif l.isascending():
673 first = l.min()
673 first = l.min()
674 second = l.max()
674 second = l.max()
675 elif l.isdescending():
675 elif l.isdescending():
676 first = l.max()
676 first = l.max()
677 second = l.min()
677 second = l.min()
678 else:
678 else:
679 first = l.first()
679 first = l.first()
680 second = l.last()
680 second = l.last()
681
681
682 if first is None:
682 if first is None:
683 raise error.Abort(_('empty revision range'))
683 raise error.Abort(_('empty revision range'))
684 if (first == second and len(revs) >= 2
684 if (first == second and len(revs) >= 2
685 and not all(revrange(repo, [r]) for r in revs)):
685 and not all(revrange(repo, [r]) for r in revs)):
686 raise error.Abort(_('empty revision on one side of range'))
686 raise error.Abort(_('empty revision on one side of range'))
687
687
688 # if top-level is range expression, the result must always be a pair
688 # if top-level is range expression, the result must always be a pair
689 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
689 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
690 return repo[first], repo[None]
690 return repo[first], repo[None]
691
691
692 return repo[first], repo[second]
692 return repo[first], repo[second]
693
693
694 def revrange(repo, specs, localalias=None):
694 def revrange(repo, specs, localalias=None):
695 """Execute 1 to many revsets and return the union.
695 """Execute 1 to many revsets and return the union.
696
696
697 This is the preferred mechanism for executing revsets using user-specified
697 This is the preferred mechanism for executing revsets using user-specified
698 config options, such as revset aliases.
698 config options, such as revset aliases.
699
699
700 The revsets specified by ``specs`` will be executed via a chained ``OR``
700 The revsets specified by ``specs`` will be executed via a chained ``OR``
701 expression. If ``specs`` is empty, an empty result is returned.
701 expression. If ``specs`` is empty, an empty result is returned.
702
702
703 ``specs`` can contain integers, in which case they are assumed to be
703 ``specs`` can contain integers, in which case they are assumed to be
704 revision numbers.
704 revision numbers.
705
705
706 It is assumed the revsets are already formatted. If you have arguments
706 It is assumed the revsets are already formatted. If you have arguments
707 that need to be expanded in the revset, call ``revsetlang.formatspec()``
707 that need to be expanded in the revset, call ``revsetlang.formatspec()``
708 and pass the result as an element of ``specs``.
708 and pass the result as an element of ``specs``.
709
709
710 Specifying a single revset is allowed.
710 Specifying a single revset is allowed.
711
711
712 Returns a ``revset.abstractsmartset`` which is a list-like interface over
712 Returns a ``revset.abstractsmartset`` which is a list-like interface over
713 integer revisions.
713 integer revisions.
714 """
714 """
715 allspecs = []
715 allspecs = []
716 for spec in specs:
716 for spec in specs:
717 if isinstance(spec, int):
717 if isinstance(spec, int):
718 spec = revsetlang.formatspec('rev(%d)', spec)
718 spec = revsetlang.formatspec('rev(%d)', spec)
719 allspecs.append(spec)
719 allspecs.append(spec)
720 return repo.anyrevs(allspecs, user=True, localalias=localalias)
720 return repo.anyrevs(allspecs, user=True, localalias=localalias)
721
721
722 def meaningfulparents(repo, ctx):
722 def meaningfulparents(repo, ctx):
723 """Return list of meaningful (or all if debug) parentrevs for rev.
723 """Return list of meaningful (or all if debug) parentrevs for rev.
724
724
725 For merges (two non-nullrev revisions) both parents are meaningful.
725 For merges (two non-nullrev revisions) both parents are meaningful.
726 Otherwise the first parent revision is considered meaningful if it
726 Otherwise the first parent revision is considered meaningful if it
727 is not the preceding revision.
727 is not the preceding revision.
728 """
728 """
729 parents = ctx.parents()
729 parents = ctx.parents()
730 if len(parents) > 1:
730 if len(parents) > 1:
731 return parents
731 return parents
732 if repo.ui.debugflag:
732 if repo.ui.debugflag:
733 return [parents[0], repo['null']]
733 return [parents[0], repo['null']]
734 if parents[0].rev() >= intrev(ctx) - 1:
734 if parents[0].rev() >= intrev(ctx) - 1:
735 return []
735 return []
736 return parents
736 return parents
737
737
738 def expandpats(pats):
738 def expandpats(pats):
739 '''Expand bare globs when running on windows.
739 '''Expand bare globs when running on windows.
740 On posix we assume it already has already been done by sh.'''
740 On posix we assume it already has already been done by sh.'''
741 if not util.expandglobs:
741 if not util.expandglobs:
742 return list(pats)
742 return list(pats)
743 ret = []
743 ret = []
744 for kindpat in pats:
744 for kindpat in pats:
745 kind, pat = matchmod._patsplit(kindpat, None)
745 kind, pat = matchmod._patsplit(kindpat, None)
746 if kind is None:
746 if kind is None:
747 try:
747 try:
748 globbed = glob.glob(pat)
748 globbed = glob.glob(pat)
749 except re.error:
749 except re.error:
750 globbed = [pat]
750 globbed = [pat]
751 if globbed:
751 if globbed:
752 ret.extend(globbed)
752 ret.extend(globbed)
753 continue
753 continue
754 ret.append(kindpat)
754 ret.append(kindpat)
755 return ret
755 return ret
756
756
757 def matchandpats(ctx, pats=(), opts=None, globbed=False, default='relpath',
757 def matchandpats(ctx, pats=(), opts=None, globbed=False, default='relpath',
758 badfn=None):
758 badfn=None):
759 '''Return a matcher and the patterns that were used.
759 '''Return a matcher and the patterns that were used.
760 The matcher will warn about bad matches, unless an alternate badfn callback
760 The matcher will warn about bad matches, unless an alternate badfn callback
761 is provided.'''
761 is provided.'''
762 if pats == ("",):
762 if pats == ("",):
763 pats = []
763 pats = []
764 if opts is None:
764 if opts is None:
765 opts = {}
765 opts = {}
766 if not globbed and default == 'relpath':
766 if not globbed and default == 'relpath':
767 pats = expandpats(pats or [])
767 pats = expandpats(pats or [])
768
768
769 def bad(f, msg):
769 def bad(f, msg):
770 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
770 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
771
771
772 if badfn is None:
772 if badfn is None:
773 badfn = bad
773 badfn = bad
774
774
775 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
775 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
776 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
776 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
777
777
778 if m.always():
778 if m.always():
779 pats = []
779 pats = []
780 return m, pats
780 return m, pats
781
781
782 def match(ctx, pats=(), opts=None, globbed=False, default='relpath',
782 def match(ctx, pats=(), opts=None, globbed=False, default='relpath',
783 badfn=None):
783 badfn=None):
784 '''Return a matcher that will warn about bad matches.'''
784 '''Return a matcher that will warn about bad matches.'''
785 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
785 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
786
786
787 def matchall(repo):
787 def matchall(repo):
788 '''Return a matcher that will efficiently match everything.'''
788 '''Return a matcher that will efficiently match everything.'''
789 return matchmod.always(repo.root, repo.getcwd())
789 return matchmod.always(repo.root, repo.getcwd())
790
790
791 def matchfiles(repo, files, badfn=None):
791 def matchfiles(repo, files, badfn=None):
792 '''Return a matcher that will efficiently match exactly these files.'''
792 '''Return a matcher that will efficiently match exactly these files.'''
793 return matchmod.exact(repo.root, repo.getcwd(), files, badfn=badfn)
793 return matchmod.exact(repo.root, repo.getcwd(), files, badfn=badfn)
794
794
795 def parsefollowlinespattern(repo, rev, pat, msg):
795 def parsefollowlinespattern(repo, rev, pat, msg):
796 """Return a file name from `pat` pattern suitable for usage in followlines
796 """Return a file name from `pat` pattern suitable for usage in followlines
797 logic.
797 logic.
798 """
798 """
799 if not matchmod.patkind(pat):
799 if not matchmod.patkind(pat):
800 return pathutil.canonpath(repo.root, repo.getcwd(), pat)
800 return pathutil.canonpath(repo.root, repo.getcwd(), pat)
801 else:
801 else:
802 ctx = repo[rev]
802 ctx = repo[rev]
803 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=ctx)
803 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=ctx)
804 files = [f for f in ctx if m(f)]
804 files = [f for f in ctx if m(f)]
805 if len(files) != 1:
805 if len(files) != 1:
806 raise error.ParseError(msg)
806 raise error.ParseError(msg)
807 return files[0]
807 return files[0]
808
808
809 def origpath(ui, repo, filepath):
809 def origpath(ui, repo, filepath):
810 '''customize where .orig files are created
810 '''customize where .orig files are created
811
811
812 Fetch user defined path from config file: [ui] origbackuppath = <path>
812 Fetch user defined path from config file: [ui] origbackuppath = <path>
813 Fall back to default (filepath with .orig suffix) if not specified
813 Fall back to default (filepath with .orig suffix) if not specified
814 '''
814 '''
815 origbackuppath = ui.config('ui', 'origbackuppath')
815 origbackuppath = ui.config('ui', 'origbackuppath')
816 if not origbackuppath:
816 if not origbackuppath:
817 return filepath + ".orig"
817 return filepath + ".orig"
818
818
819 # Convert filepath from an absolute path into a path inside the repo.
819 # Convert filepath from an absolute path into a path inside the repo.
820 filepathfromroot = util.normpath(os.path.relpath(filepath,
820 filepathfromroot = util.normpath(os.path.relpath(filepath,
821 start=repo.root))
821 start=repo.root))
822
822
823 origvfs = vfs.vfs(repo.wjoin(origbackuppath))
823 origvfs = vfs.vfs(repo.wjoin(origbackuppath))
824 origbackupdir = origvfs.dirname(filepathfromroot)
824 origbackupdir = origvfs.dirname(filepathfromroot)
825 if not origvfs.isdir(origbackupdir) or origvfs.islink(origbackupdir):
825 if not origvfs.isdir(origbackupdir) or origvfs.islink(origbackupdir):
826 ui.note(_('creating directory: %s\n') % origvfs.join(origbackupdir))
826 ui.note(_('creating directory: %s\n') % origvfs.join(origbackupdir))
827
827
828 # Remove any files that conflict with the backup file's path
828 # Remove any files that conflict with the backup file's path
829 for f in reversed(list(util.finddirs(filepathfromroot))):
829 for f in reversed(list(util.finddirs(filepathfromroot))):
830 if origvfs.isfileorlink(f):
830 if origvfs.isfileorlink(f):
831 ui.note(_('removing conflicting file: %s\n')
831 ui.note(_('removing conflicting file: %s\n')
832 % origvfs.join(f))
832 % origvfs.join(f))
833 origvfs.unlink(f)
833 origvfs.unlink(f)
834 break
834 break
835
835
836 origvfs.makedirs(origbackupdir)
836 origvfs.makedirs(origbackupdir)
837
837
838 if origvfs.isdir(filepathfromroot) and not origvfs.islink(filepathfromroot):
838 if origvfs.isdir(filepathfromroot) and not origvfs.islink(filepathfromroot):
839 ui.note(_('removing conflicting directory: %s\n')
839 ui.note(_('removing conflicting directory: %s\n')
840 % origvfs.join(filepathfromroot))
840 % origvfs.join(filepathfromroot))
841 origvfs.rmtree(filepathfromroot, forcibly=True)
841 origvfs.rmtree(filepathfromroot, forcibly=True)
842
842
843 return origvfs.join(filepathfromroot)
843 return origvfs.join(filepathfromroot)
844
844
845 class _containsnode(object):
845 class _containsnode(object):
846 """proxy __contains__(node) to container.__contains__ which accepts revs"""
846 """proxy __contains__(node) to container.__contains__ which accepts revs"""
847
847
848 def __init__(self, repo, revcontainer):
848 def __init__(self, repo, revcontainer):
849 self._torev = repo.changelog.rev
849 self._torev = repo.changelog.rev
850 self._revcontains = revcontainer.__contains__
850 self._revcontains = revcontainer.__contains__
851
851
852 def __contains__(self, node):
852 def __contains__(self, node):
853 return self._revcontains(self._torev(node))
853 return self._revcontains(self._torev(node))
854
854
855 def cleanupnodes(repo, replacements, operation, moves=None, metadata=None,
855 def cleanupnodes(repo, replacements, operation, moves=None, metadata=None,
856 fixphase=False, targetphase=None, backup=True):
856 fixphase=False, targetphase=None, backup=True):
857 """do common cleanups when old nodes are replaced by new nodes
857 """do common cleanups when old nodes are replaced by new nodes
858
858
859 That includes writing obsmarkers or stripping nodes, and moving bookmarks.
859 That includes writing obsmarkers or stripping nodes, and moving bookmarks.
860 (we might also want to move working directory parent in the future)
860 (we might also want to move working directory parent in the future)
861
861
862 By default, bookmark moves are calculated automatically from 'replacements',
862 By default, bookmark moves are calculated automatically from 'replacements',
863 but 'moves' can be used to override that. Also, 'moves' may include
863 but 'moves' can be used to override that. Also, 'moves' may include
864 additional bookmark moves that should not have associated obsmarkers.
864 additional bookmark moves that should not have associated obsmarkers.
865
865
866 replacements is {oldnode: [newnode]} or a iterable of nodes if they do not
866 replacements is {oldnode: [newnode]} or a iterable of nodes if they do not
867 have replacements. operation is a string, like "rebase".
867 have replacements. operation is a string, like "rebase".
868
868
869 metadata is dictionary containing metadata to be stored in obsmarker if
869 metadata is dictionary containing metadata to be stored in obsmarker if
870 obsolescence is enabled.
870 obsolescence is enabled.
871 """
871 """
872 assert fixphase or targetphase is None
872 assert fixphase or targetphase is None
873 if not replacements and not moves:
873 if not replacements and not moves:
874 return
874 return
875
875
876 # translate mapping's other forms
876 # translate mapping's other forms
877 if not util.safehasattr(replacements, 'items'):
877 if not util.safehasattr(replacements, 'items'):
878 replacements = {n: () for n in replacements}
878 replacements = {n: () for n in replacements}
879
879
880 # Calculate bookmark movements
880 # Calculate bookmark movements
881 if moves is None:
881 if moves is None:
882 moves = {}
882 moves = {}
883 # Unfiltered repo is needed since nodes in replacements might be hidden.
883 # Unfiltered repo is needed since nodes in replacements might be hidden.
884 unfi = repo.unfiltered()
884 unfi = repo.unfiltered()
885 for oldnode, newnodes in replacements.items():
885 for oldnode, newnodes in replacements.items():
886 if oldnode in moves:
886 if oldnode in moves:
887 continue
887 continue
888 if len(newnodes) > 1:
888 if len(newnodes) > 1:
889 # usually a split, take the one with biggest rev number
889 # usually a split, take the one with biggest rev number
890 newnode = next(unfi.set('max(%ln)', newnodes)).node()
890 newnode = next(unfi.set('max(%ln)', newnodes)).node()
891 elif len(newnodes) == 0:
891 elif len(newnodes) == 0:
892 # move bookmark backwards
892 # move bookmark backwards
893 roots = list(unfi.set('max((::%n) - %ln)', oldnode,
893 roots = list(unfi.set('max((::%n) - %ln)', oldnode,
894 list(replacements)))
894 list(replacements)))
895 if roots:
895 if roots:
896 newnode = roots[0].node()
896 newnode = roots[0].node()
897 else:
897 else:
898 newnode = nullid
898 newnode = nullid
899 else:
899 else:
900 newnode = newnodes[0]
900 newnode = newnodes[0]
901 moves[oldnode] = newnode
901 moves[oldnode] = newnode
902
902
903 allnewnodes = [n for ns in replacements.values() for n in ns]
903 allnewnodes = [n for ns in replacements.values() for n in ns]
904 toretract = {}
904 toretract = {}
905 toadvance = {}
905 toadvance = {}
906 if fixphase:
906 if fixphase:
907 precursors = {}
907 precursors = {}
908 for oldnode, newnodes in replacements.items():
908 for oldnode, newnodes in replacements.items():
909 for newnode in newnodes:
909 for newnode in newnodes:
910 precursors.setdefault(newnode, []).append(oldnode)
910 precursors.setdefault(newnode, []).append(oldnode)
911
911
912 allnewnodes.sort(key=lambda n: unfi[n].rev())
912 allnewnodes.sort(key=lambda n: unfi[n].rev())
913 newphases = {}
913 newphases = {}
914 def phase(ctx):
914 def phase(ctx):
915 return newphases.get(ctx.node(), ctx.phase())
915 return newphases.get(ctx.node(), ctx.phase())
916 for newnode in allnewnodes:
916 for newnode in allnewnodes:
917 ctx = unfi[newnode]
917 ctx = unfi[newnode]
918 parentphase = max(phase(p) for p in ctx.parents())
918 parentphase = max(phase(p) for p in ctx.parents())
919 if targetphase is None:
919 if targetphase is None:
920 oldphase = max(unfi[oldnode].phase()
920 oldphase = max(unfi[oldnode].phase()
921 for oldnode in precursors[newnode])
921 for oldnode in precursors[newnode])
922 newphase = max(oldphase, parentphase)
922 newphase = max(oldphase, parentphase)
923 else:
923 else:
924 newphase = max(targetphase, parentphase)
924 newphase = max(targetphase, parentphase)
925 newphases[newnode] = newphase
925 newphases[newnode] = newphase
926 if newphase > ctx.phase():
926 if newphase > ctx.phase():
927 toretract.setdefault(newphase, []).append(newnode)
927 toretract.setdefault(newphase, []).append(newnode)
928 elif newphase < ctx.phase():
928 elif newphase < ctx.phase():
929 toadvance.setdefault(newphase, []).append(newnode)
929 toadvance.setdefault(newphase, []).append(newnode)
930
930
931 with repo.transaction('cleanup') as tr:
931 with repo.transaction('cleanup') as tr:
932 # Move bookmarks
932 # Move bookmarks
933 bmarks = repo._bookmarks
933 bmarks = repo._bookmarks
934 bmarkchanges = []
934 bmarkchanges = []
935 for oldnode, newnode in moves.items():
935 for oldnode, newnode in moves.items():
936 oldbmarks = repo.nodebookmarks(oldnode)
936 oldbmarks = repo.nodebookmarks(oldnode)
937 if not oldbmarks:
937 if not oldbmarks:
938 continue
938 continue
939 from . import bookmarks # avoid import cycle
939 from . import bookmarks # avoid import cycle
940 repo.ui.debug('moving bookmarks %r from %s to %s\n' %
940 repo.ui.debug('moving bookmarks %r from %s to %s\n' %
941 (pycompat.rapply(pycompat.maybebytestr, oldbmarks),
941 (pycompat.rapply(pycompat.maybebytestr, oldbmarks),
942 hex(oldnode), hex(newnode)))
942 hex(oldnode), hex(newnode)))
943 # Delete divergent bookmarks being parents of related newnodes
943 # Delete divergent bookmarks being parents of related newnodes
944 deleterevs = repo.revs('parents(roots(%ln & (::%n))) - parents(%n)',
944 deleterevs = repo.revs('parents(roots(%ln & (::%n))) - parents(%n)',
945 allnewnodes, newnode, oldnode)
945 allnewnodes, newnode, oldnode)
946 deletenodes = _containsnode(repo, deleterevs)
946 deletenodes = _containsnode(repo, deleterevs)
947 for name in oldbmarks:
947 for name in oldbmarks:
948 bmarkchanges.append((name, newnode))
948 bmarkchanges.append((name, newnode))
949 for b in bookmarks.divergent2delete(repo, deletenodes, name):
949 for b in bookmarks.divergent2delete(repo, deletenodes, name):
950 bmarkchanges.append((b, None))
950 bmarkchanges.append((b, None))
951
951
952 if bmarkchanges:
952 if bmarkchanges:
953 bmarks.applychanges(repo, tr, bmarkchanges)
953 bmarks.applychanges(repo, tr, bmarkchanges)
954
954
955 for phase, nodes in toretract.items():
955 for phase, nodes in toretract.items():
956 phases.retractboundary(repo, tr, phase, nodes)
956 phases.retractboundary(repo, tr, phase, nodes)
957 for phase, nodes in toadvance.items():
957 for phase, nodes in toadvance.items():
958 phases.advanceboundary(repo, tr, phase, nodes)
958 phases.advanceboundary(repo, tr, phase, nodes)
959
959
960 # Obsolete or strip nodes
960 # Obsolete or strip nodes
961 if obsolete.isenabled(repo, obsolete.createmarkersopt):
961 if obsolete.isenabled(repo, obsolete.createmarkersopt):
962 # If a node is already obsoleted, and we want to obsolete it
962 # If a node is already obsoleted, and we want to obsolete it
963 # without a successor, skip that obssolete request since it's
963 # without a successor, skip that obssolete request since it's
964 # unnecessary. That's the "if s or not isobs(n)" check below.
964 # unnecessary. That's the "if s or not isobs(n)" check below.
965 # Also sort the node in topology order, that might be useful for
965 # Also sort the node in topology order, that might be useful for
966 # some obsstore logic.
966 # some obsstore logic.
967 # NOTE: the filtering and sorting might belong to createmarkers.
967 # NOTE: the filtering and sorting might belong to createmarkers.
968 isobs = unfi.obsstore.successors.__contains__
968 isobs = unfi.obsstore.successors.__contains__
969 torev = unfi.changelog.rev
969 torev = unfi.changelog.rev
970 sortfunc = lambda ns: torev(ns[0])
970 sortfunc = lambda ns: torev(ns[0])
971 rels = [(unfi[n], tuple(unfi[m] for m in s))
971 rels = []
972 for n, s in sorted(replacements.items(), key=sortfunc)
972 for n, s in sorted(replacements.items(), key=sortfunc):
973 if s or not isobs(n)]
973 if s or not isobs(n):
974 rel = (unfi[n], tuple(unfi[m] for m in s))
975 rels.append(rel)
974 if rels:
976 if rels:
975 obsolete.createmarkers(repo, rels, operation=operation,
977 obsolete.createmarkers(repo, rels, operation=operation,
976 metadata=metadata)
978 metadata=metadata)
977 else:
979 else:
978 from . import repair # avoid import cycle
980 from . import repair # avoid import cycle
979 tostrip = list(replacements)
981 tostrip = list(replacements)
980 if tostrip:
982 if tostrip:
981 repair.delayedstrip(repo.ui, repo, tostrip, operation,
983 repair.delayedstrip(repo.ui, repo, tostrip, operation,
982 backup=backup)
984 backup=backup)
983
985
984 def addremove(repo, matcher, prefix, opts=None):
986 def addremove(repo, matcher, prefix, opts=None):
985 if opts is None:
987 if opts is None:
986 opts = {}
988 opts = {}
987 m = matcher
989 m = matcher
988 dry_run = opts.get('dry_run')
990 dry_run = opts.get('dry_run')
989 try:
991 try:
990 similarity = float(opts.get('similarity') or 0)
992 similarity = float(opts.get('similarity') or 0)
991 except ValueError:
993 except ValueError:
992 raise error.Abort(_('similarity must be a number'))
994 raise error.Abort(_('similarity must be a number'))
993 if similarity < 0 or similarity > 100:
995 if similarity < 0 or similarity > 100:
994 raise error.Abort(_('similarity must be between 0 and 100'))
996 raise error.Abort(_('similarity must be between 0 and 100'))
995 similarity /= 100.0
997 similarity /= 100.0
996
998
997 ret = 0
999 ret = 0
998 join = lambda f: os.path.join(prefix, f)
1000 join = lambda f: os.path.join(prefix, f)
999
1001
1000 wctx = repo[None]
1002 wctx = repo[None]
1001 for subpath in sorted(wctx.substate):
1003 for subpath in sorted(wctx.substate):
1002 submatch = matchmod.subdirmatcher(subpath, m)
1004 submatch = matchmod.subdirmatcher(subpath, m)
1003 if opts.get('subrepos') or m.exact(subpath) or any(submatch.files()):
1005 if opts.get('subrepos') or m.exact(subpath) or any(submatch.files()):
1004 sub = wctx.sub(subpath)
1006 sub = wctx.sub(subpath)
1005 try:
1007 try:
1006 if sub.addremove(submatch, prefix, opts):
1008 if sub.addremove(submatch, prefix, opts):
1007 ret = 1
1009 ret = 1
1008 except error.LookupError:
1010 except error.LookupError:
1009 repo.ui.status(_("skipping missing subrepository: %s\n")
1011 repo.ui.status(_("skipping missing subrepository: %s\n")
1010 % join(subpath))
1012 % join(subpath))
1011
1013
1012 rejected = []
1014 rejected = []
1013 def badfn(f, msg):
1015 def badfn(f, msg):
1014 if f in m.files():
1016 if f in m.files():
1015 m.bad(f, msg)
1017 m.bad(f, msg)
1016 rejected.append(f)
1018 rejected.append(f)
1017
1019
1018 badmatch = matchmod.badmatch(m, badfn)
1020 badmatch = matchmod.badmatch(m, badfn)
1019 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
1021 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
1020 badmatch)
1022 badmatch)
1021
1023
1022 unknownset = set(unknown + forgotten)
1024 unknownset = set(unknown + forgotten)
1023 toprint = unknownset.copy()
1025 toprint = unknownset.copy()
1024 toprint.update(deleted)
1026 toprint.update(deleted)
1025 for abs in sorted(toprint):
1027 for abs in sorted(toprint):
1026 if repo.ui.verbose or not m.exact(abs):
1028 if repo.ui.verbose or not m.exact(abs):
1027 if abs in unknownset:
1029 if abs in unknownset:
1028 status = _('adding %s\n') % m.uipath(abs)
1030 status = _('adding %s\n') % m.uipath(abs)
1029 label = 'addremove.added'
1031 label = 'addremove.added'
1030 else:
1032 else:
1031 status = _('removing %s\n') % m.uipath(abs)
1033 status = _('removing %s\n') % m.uipath(abs)
1032 label = 'addremove.removed'
1034 label = 'addremove.removed'
1033 repo.ui.status(status, label=label)
1035 repo.ui.status(status, label=label)
1034
1036
1035 renames = _findrenames(repo, m, added + unknown, removed + deleted,
1037 renames = _findrenames(repo, m, added + unknown, removed + deleted,
1036 similarity)
1038 similarity)
1037
1039
1038 if not dry_run:
1040 if not dry_run:
1039 _markchanges(repo, unknown + forgotten, deleted, renames)
1041 _markchanges(repo, unknown + forgotten, deleted, renames)
1040
1042
1041 for f in rejected:
1043 for f in rejected:
1042 if f in m.files():
1044 if f in m.files():
1043 return 1
1045 return 1
1044 return ret
1046 return ret
1045
1047
1046 def marktouched(repo, files, similarity=0.0):
1048 def marktouched(repo, files, similarity=0.0):
1047 '''Assert that files have somehow been operated upon. files are relative to
1049 '''Assert that files have somehow been operated upon. files are relative to
1048 the repo root.'''
1050 the repo root.'''
1049 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
1051 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
1050 rejected = []
1052 rejected = []
1051
1053
1052 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
1054 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
1053
1055
1054 if repo.ui.verbose:
1056 if repo.ui.verbose:
1055 unknownset = set(unknown + forgotten)
1057 unknownset = set(unknown + forgotten)
1056 toprint = unknownset.copy()
1058 toprint = unknownset.copy()
1057 toprint.update(deleted)
1059 toprint.update(deleted)
1058 for abs in sorted(toprint):
1060 for abs in sorted(toprint):
1059 if abs in unknownset:
1061 if abs in unknownset:
1060 status = _('adding %s\n') % abs
1062 status = _('adding %s\n') % abs
1061 else:
1063 else:
1062 status = _('removing %s\n') % abs
1064 status = _('removing %s\n') % abs
1063 repo.ui.status(status)
1065 repo.ui.status(status)
1064
1066
1065 renames = _findrenames(repo, m, added + unknown, removed + deleted,
1067 renames = _findrenames(repo, m, added + unknown, removed + deleted,
1066 similarity)
1068 similarity)
1067
1069
1068 _markchanges(repo, unknown + forgotten, deleted, renames)
1070 _markchanges(repo, unknown + forgotten, deleted, renames)
1069
1071
1070 for f in rejected:
1072 for f in rejected:
1071 if f in m.files():
1073 if f in m.files():
1072 return 1
1074 return 1
1073 return 0
1075 return 0
1074
1076
1075 def _interestingfiles(repo, matcher):
1077 def _interestingfiles(repo, matcher):
1076 '''Walk dirstate with matcher, looking for files that addremove would care
1078 '''Walk dirstate with matcher, looking for files that addremove would care
1077 about.
1079 about.
1078
1080
1079 This is different from dirstate.status because it doesn't care about
1081 This is different from dirstate.status because it doesn't care about
1080 whether files are modified or clean.'''
1082 whether files are modified or clean.'''
1081 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1083 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1082 audit_path = pathutil.pathauditor(repo.root, cached=True)
1084 audit_path = pathutil.pathauditor(repo.root, cached=True)
1083
1085
1084 ctx = repo[None]
1086 ctx = repo[None]
1085 dirstate = repo.dirstate
1087 dirstate = repo.dirstate
1086 walkresults = dirstate.walk(matcher, subrepos=sorted(ctx.substate),
1088 walkresults = dirstate.walk(matcher, subrepos=sorted(ctx.substate),
1087 unknown=True, ignored=False, full=False)
1089 unknown=True, ignored=False, full=False)
1088 for abs, st in walkresults.iteritems():
1090 for abs, st in walkresults.iteritems():
1089 dstate = dirstate[abs]
1091 dstate = dirstate[abs]
1090 if dstate == '?' and audit_path.check(abs):
1092 if dstate == '?' and audit_path.check(abs):
1091 unknown.append(abs)
1093 unknown.append(abs)
1092 elif dstate != 'r' and not st:
1094 elif dstate != 'r' and not st:
1093 deleted.append(abs)
1095 deleted.append(abs)
1094 elif dstate == 'r' and st:
1096 elif dstate == 'r' and st:
1095 forgotten.append(abs)
1097 forgotten.append(abs)
1096 # for finding renames
1098 # for finding renames
1097 elif dstate == 'r' and not st:
1099 elif dstate == 'r' and not st:
1098 removed.append(abs)
1100 removed.append(abs)
1099 elif dstate == 'a':
1101 elif dstate == 'a':
1100 added.append(abs)
1102 added.append(abs)
1101
1103
1102 return added, unknown, deleted, removed, forgotten
1104 return added, unknown, deleted, removed, forgotten
1103
1105
1104 def _findrenames(repo, matcher, added, removed, similarity):
1106 def _findrenames(repo, matcher, added, removed, similarity):
1105 '''Find renames from removed files to added ones.'''
1107 '''Find renames from removed files to added ones.'''
1106 renames = {}
1108 renames = {}
1107 if similarity > 0:
1109 if similarity > 0:
1108 for old, new, score in similar.findrenames(repo, added, removed,
1110 for old, new, score in similar.findrenames(repo, added, removed,
1109 similarity):
1111 similarity):
1110 if (repo.ui.verbose or not matcher.exact(old)
1112 if (repo.ui.verbose or not matcher.exact(old)
1111 or not matcher.exact(new)):
1113 or not matcher.exact(new)):
1112 repo.ui.status(_('recording removal of %s as rename to %s '
1114 repo.ui.status(_('recording removal of %s as rename to %s '
1113 '(%d%% similar)\n') %
1115 '(%d%% similar)\n') %
1114 (matcher.rel(old), matcher.rel(new),
1116 (matcher.rel(old), matcher.rel(new),
1115 score * 100))
1117 score * 100))
1116 renames[new] = old
1118 renames[new] = old
1117 return renames
1119 return renames
1118
1120
1119 def _markchanges(repo, unknown, deleted, renames):
1121 def _markchanges(repo, unknown, deleted, renames):
1120 '''Marks the files in unknown as added, the files in deleted as removed,
1122 '''Marks the files in unknown as added, the files in deleted as removed,
1121 and the files in renames as copied.'''
1123 and the files in renames as copied.'''
1122 wctx = repo[None]
1124 wctx = repo[None]
1123 with repo.wlock():
1125 with repo.wlock():
1124 wctx.forget(deleted)
1126 wctx.forget(deleted)
1125 wctx.add(unknown)
1127 wctx.add(unknown)
1126 for new, old in renames.iteritems():
1128 for new, old in renames.iteritems():
1127 wctx.copy(old, new)
1129 wctx.copy(old, new)
1128
1130
1129 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1131 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1130 """Update the dirstate to reflect the intent of copying src to dst. For
1132 """Update the dirstate to reflect the intent of copying src to dst. For
1131 different reasons it might not end with dst being marked as copied from src.
1133 different reasons it might not end with dst being marked as copied from src.
1132 """
1134 """
1133 origsrc = repo.dirstate.copied(src) or src
1135 origsrc = repo.dirstate.copied(src) or src
1134 if dst == origsrc: # copying back a copy?
1136 if dst == origsrc: # copying back a copy?
1135 if repo.dirstate[dst] not in 'mn' and not dryrun:
1137 if repo.dirstate[dst] not in 'mn' and not dryrun:
1136 repo.dirstate.normallookup(dst)
1138 repo.dirstate.normallookup(dst)
1137 else:
1139 else:
1138 if repo.dirstate[origsrc] == 'a' and origsrc == src:
1140 if repo.dirstate[origsrc] == 'a' and origsrc == src:
1139 if not ui.quiet:
1141 if not ui.quiet:
1140 ui.warn(_("%s has not been committed yet, so no copy "
1142 ui.warn(_("%s has not been committed yet, so no copy "
1141 "data will be stored for %s.\n")
1143 "data will be stored for %s.\n")
1142 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
1144 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
1143 if repo.dirstate[dst] in '?r' and not dryrun:
1145 if repo.dirstate[dst] in '?r' and not dryrun:
1144 wctx.add([dst])
1146 wctx.add([dst])
1145 elif not dryrun:
1147 elif not dryrun:
1146 wctx.copy(origsrc, dst)
1148 wctx.copy(origsrc, dst)
1147
1149
1148 def writerequires(opener, requirements):
1150 def writerequires(opener, requirements):
1149 with opener('requires', 'w') as fp:
1151 with opener('requires', 'w') as fp:
1150 for r in sorted(requirements):
1152 for r in sorted(requirements):
1151 fp.write("%s\n" % r)
1153 fp.write("%s\n" % r)
1152
1154
1153 class filecachesubentry(object):
1155 class filecachesubentry(object):
1154 def __init__(self, path, stat):
1156 def __init__(self, path, stat):
1155 self.path = path
1157 self.path = path
1156 self.cachestat = None
1158 self.cachestat = None
1157 self._cacheable = None
1159 self._cacheable = None
1158
1160
1159 if stat:
1161 if stat:
1160 self.cachestat = filecachesubentry.stat(self.path)
1162 self.cachestat = filecachesubentry.stat(self.path)
1161
1163
1162 if self.cachestat:
1164 if self.cachestat:
1163 self._cacheable = self.cachestat.cacheable()
1165 self._cacheable = self.cachestat.cacheable()
1164 else:
1166 else:
1165 # None means we don't know yet
1167 # None means we don't know yet
1166 self._cacheable = None
1168 self._cacheable = None
1167
1169
1168 def refresh(self):
1170 def refresh(self):
1169 if self.cacheable():
1171 if self.cacheable():
1170 self.cachestat = filecachesubentry.stat(self.path)
1172 self.cachestat = filecachesubentry.stat(self.path)
1171
1173
1172 def cacheable(self):
1174 def cacheable(self):
1173 if self._cacheable is not None:
1175 if self._cacheable is not None:
1174 return self._cacheable
1176 return self._cacheable
1175
1177
1176 # we don't know yet, assume it is for now
1178 # we don't know yet, assume it is for now
1177 return True
1179 return True
1178
1180
1179 def changed(self):
1181 def changed(self):
1180 # no point in going further if we can't cache it
1182 # no point in going further if we can't cache it
1181 if not self.cacheable():
1183 if not self.cacheable():
1182 return True
1184 return True
1183
1185
1184 newstat = filecachesubentry.stat(self.path)
1186 newstat = filecachesubentry.stat(self.path)
1185
1187
1186 # we may not know if it's cacheable yet, check again now
1188 # we may not know if it's cacheable yet, check again now
1187 if newstat and self._cacheable is None:
1189 if newstat and self._cacheable is None:
1188 self._cacheable = newstat.cacheable()
1190 self._cacheable = newstat.cacheable()
1189
1191
1190 # check again
1192 # check again
1191 if not self._cacheable:
1193 if not self._cacheable:
1192 return True
1194 return True
1193
1195
1194 if self.cachestat != newstat:
1196 if self.cachestat != newstat:
1195 self.cachestat = newstat
1197 self.cachestat = newstat
1196 return True
1198 return True
1197 else:
1199 else:
1198 return False
1200 return False
1199
1201
1200 @staticmethod
1202 @staticmethod
1201 def stat(path):
1203 def stat(path):
1202 try:
1204 try:
1203 return util.cachestat(path)
1205 return util.cachestat(path)
1204 except OSError as e:
1206 except OSError as e:
1205 if e.errno != errno.ENOENT:
1207 if e.errno != errno.ENOENT:
1206 raise
1208 raise
1207
1209
1208 class filecacheentry(object):
1210 class filecacheentry(object):
1209 def __init__(self, paths, stat=True):
1211 def __init__(self, paths, stat=True):
1210 self._entries = []
1212 self._entries = []
1211 for path in paths:
1213 for path in paths:
1212 self._entries.append(filecachesubentry(path, stat))
1214 self._entries.append(filecachesubentry(path, stat))
1213
1215
1214 def changed(self):
1216 def changed(self):
1215 '''true if any entry has changed'''
1217 '''true if any entry has changed'''
1216 for entry in self._entries:
1218 for entry in self._entries:
1217 if entry.changed():
1219 if entry.changed():
1218 return True
1220 return True
1219 return False
1221 return False
1220
1222
1221 def refresh(self):
1223 def refresh(self):
1222 for entry in self._entries:
1224 for entry in self._entries:
1223 entry.refresh()
1225 entry.refresh()
1224
1226
1225 class filecache(object):
1227 class filecache(object):
1226 """A property like decorator that tracks files under .hg/ for updates.
1228 """A property like decorator that tracks files under .hg/ for updates.
1227
1229
1228 On first access, the files defined as arguments are stat()ed and the
1230 On first access, the files defined as arguments are stat()ed and the
1229 results cached. The decorated function is called. The results are stashed
1231 results cached. The decorated function is called. The results are stashed
1230 away in a ``_filecache`` dict on the object whose method is decorated.
1232 away in a ``_filecache`` dict on the object whose method is decorated.
1231
1233
1232 On subsequent access, the cached result is returned.
1234 On subsequent access, the cached result is returned.
1233
1235
1234 On external property set operations, stat() calls are performed and the new
1236 On external property set operations, stat() calls are performed and the new
1235 value is cached.
1237 value is cached.
1236
1238
1237 On property delete operations, cached data is removed.
1239 On property delete operations, cached data is removed.
1238
1240
1239 When using the property API, cached data is always returned, if available:
1241 When using the property API, cached data is always returned, if available:
1240 no stat() is performed to check if the file has changed and if the function
1242 no stat() is performed to check if the file has changed and if the function
1241 needs to be called to reflect file changes.
1243 needs to be called to reflect file changes.
1242
1244
1243 Others can muck about with the state of the ``_filecache`` dict. e.g. they
1245 Others can muck about with the state of the ``_filecache`` dict. e.g. they
1244 can populate an entry before the property's getter is called. In this case,
1246 can populate an entry before the property's getter is called. In this case,
1245 entries in ``_filecache`` will be used during property operations,
1247 entries in ``_filecache`` will be used during property operations,
1246 if available. If the underlying file changes, it is up to external callers
1248 if available. If the underlying file changes, it is up to external callers
1247 to reflect this by e.g. calling ``delattr(obj, attr)`` to remove the cached
1249 to reflect this by e.g. calling ``delattr(obj, attr)`` to remove the cached
1248 method result as well as possibly calling ``del obj._filecache[attr]`` to
1250 method result as well as possibly calling ``del obj._filecache[attr]`` to
1249 remove the ``filecacheentry``.
1251 remove the ``filecacheentry``.
1250 """
1252 """
1251
1253
1252 def __init__(self, *paths):
1254 def __init__(self, *paths):
1253 self.paths = paths
1255 self.paths = paths
1254
1256
1255 def join(self, obj, fname):
1257 def join(self, obj, fname):
1256 """Used to compute the runtime path of a cached file.
1258 """Used to compute the runtime path of a cached file.
1257
1259
1258 Users should subclass filecache and provide their own version of this
1260 Users should subclass filecache and provide their own version of this
1259 function to call the appropriate join function on 'obj' (an instance
1261 function to call the appropriate join function on 'obj' (an instance
1260 of the class that its member function was decorated).
1262 of the class that its member function was decorated).
1261 """
1263 """
1262 raise NotImplementedError
1264 raise NotImplementedError
1263
1265
1264 def __call__(self, func):
1266 def __call__(self, func):
1265 self.func = func
1267 self.func = func
1266 self.sname = func.__name__
1268 self.sname = func.__name__
1267 self.name = pycompat.sysbytes(self.sname)
1269 self.name = pycompat.sysbytes(self.sname)
1268 return self
1270 return self
1269
1271
1270 def __get__(self, obj, type=None):
1272 def __get__(self, obj, type=None):
1271 # if accessed on the class, return the descriptor itself.
1273 # if accessed on the class, return the descriptor itself.
1272 if obj is None:
1274 if obj is None:
1273 return self
1275 return self
1274 # do we need to check if the file changed?
1276 # do we need to check if the file changed?
1275 if self.sname in obj.__dict__:
1277 if self.sname in obj.__dict__:
1276 assert self.name in obj._filecache, self.name
1278 assert self.name in obj._filecache, self.name
1277 return obj.__dict__[self.sname]
1279 return obj.__dict__[self.sname]
1278
1280
1279 entry = obj._filecache.get(self.name)
1281 entry = obj._filecache.get(self.name)
1280
1282
1281 if entry:
1283 if entry:
1282 if entry.changed():
1284 if entry.changed():
1283 entry.obj = self.func(obj)
1285 entry.obj = self.func(obj)
1284 else:
1286 else:
1285 paths = [self.join(obj, path) for path in self.paths]
1287 paths = [self.join(obj, path) for path in self.paths]
1286
1288
1287 # We stat -before- creating the object so our cache doesn't lie if
1289 # We stat -before- creating the object so our cache doesn't lie if
1288 # a writer modified between the time we read and stat
1290 # a writer modified between the time we read and stat
1289 entry = filecacheentry(paths, True)
1291 entry = filecacheentry(paths, True)
1290 entry.obj = self.func(obj)
1292 entry.obj = self.func(obj)
1291
1293
1292 obj._filecache[self.name] = entry
1294 obj._filecache[self.name] = entry
1293
1295
1294 obj.__dict__[self.sname] = entry.obj
1296 obj.__dict__[self.sname] = entry.obj
1295 return entry.obj
1297 return entry.obj
1296
1298
1297 def __set__(self, obj, value):
1299 def __set__(self, obj, value):
1298 if self.name not in obj._filecache:
1300 if self.name not in obj._filecache:
1299 # we add an entry for the missing value because X in __dict__
1301 # we add an entry for the missing value because X in __dict__
1300 # implies X in _filecache
1302 # implies X in _filecache
1301 paths = [self.join(obj, path) for path in self.paths]
1303 paths = [self.join(obj, path) for path in self.paths]
1302 ce = filecacheentry(paths, False)
1304 ce = filecacheentry(paths, False)
1303 obj._filecache[self.name] = ce
1305 obj._filecache[self.name] = ce
1304 else:
1306 else:
1305 ce = obj._filecache[self.name]
1307 ce = obj._filecache[self.name]
1306
1308
1307 ce.obj = value # update cached copy
1309 ce.obj = value # update cached copy
1308 obj.__dict__[self.sname] = value # update copy returned by obj.x
1310 obj.__dict__[self.sname] = value # update copy returned by obj.x
1309
1311
1310 def __delete__(self, obj):
1312 def __delete__(self, obj):
1311 try:
1313 try:
1312 del obj.__dict__[self.sname]
1314 del obj.__dict__[self.sname]
1313 except KeyError:
1315 except KeyError:
1314 raise AttributeError(self.sname)
1316 raise AttributeError(self.sname)
1315
1317
1316 def extdatasource(repo, source):
1318 def extdatasource(repo, source):
1317 """Gather a map of rev -> value dict from the specified source
1319 """Gather a map of rev -> value dict from the specified source
1318
1320
1319 A source spec is treated as a URL, with a special case shell: type
1321 A source spec is treated as a URL, with a special case shell: type
1320 for parsing the output from a shell command.
1322 for parsing the output from a shell command.
1321
1323
1322 The data is parsed as a series of newline-separated records where
1324 The data is parsed as a series of newline-separated records where
1323 each record is a revision specifier optionally followed by a space
1325 each record is a revision specifier optionally followed by a space
1324 and a freeform string value. If the revision is known locally, it
1326 and a freeform string value. If the revision is known locally, it
1325 is converted to a rev, otherwise the record is skipped.
1327 is converted to a rev, otherwise the record is skipped.
1326
1328
1327 Note that both key and value are treated as UTF-8 and converted to
1329 Note that both key and value are treated as UTF-8 and converted to
1328 the local encoding. This allows uniformity between local and
1330 the local encoding. This allows uniformity between local and
1329 remote data sources.
1331 remote data sources.
1330 """
1332 """
1331
1333
1332 spec = repo.ui.config("extdata", source)
1334 spec = repo.ui.config("extdata", source)
1333 if not spec:
1335 if not spec:
1334 raise error.Abort(_("unknown extdata source '%s'") % source)
1336 raise error.Abort(_("unknown extdata source '%s'") % source)
1335
1337
1336 data = {}
1338 data = {}
1337 src = proc = None
1339 src = proc = None
1338 try:
1340 try:
1339 if spec.startswith("shell:"):
1341 if spec.startswith("shell:"):
1340 # external commands should be run relative to the repo root
1342 # external commands should be run relative to the repo root
1341 cmd = spec[6:]
1343 cmd = spec[6:]
1342 proc = subprocess.Popen(procutil.tonativestr(cmd),
1344 proc = subprocess.Popen(procutil.tonativestr(cmd),
1343 shell=True, bufsize=-1,
1345 shell=True, bufsize=-1,
1344 close_fds=procutil.closefds,
1346 close_fds=procutil.closefds,
1345 stdout=subprocess.PIPE,
1347 stdout=subprocess.PIPE,
1346 cwd=procutil.tonativestr(repo.root))
1348 cwd=procutil.tonativestr(repo.root))
1347 src = proc.stdout
1349 src = proc.stdout
1348 else:
1350 else:
1349 # treat as a URL or file
1351 # treat as a URL or file
1350 src = url.open(repo.ui, spec)
1352 src = url.open(repo.ui, spec)
1351 for l in src:
1353 for l in src:
1352 if " " in l:
1354 if " " in l:
1353 k, v = l.strip().split(" ", 1)
1355 k, v = l.strip().split(" ", 1)
1354 else:
1356 else:
1355 k, v = l.strip(), ""
1357 k, v = l.strip(), ""
1356
1358
1357 k = encoding.tolocal(k)
1359 k = encoding.tolocal(k)
1358 try:
1360 try:
1359 data[revsingle(repo, k).rev()] = encoding.tolocal(v)
1361 data[revsingle(repo, k).rev()] = encoding.tolocal(v)
1360 except (error.LookupError, error.RepoLookupError):
1362 except (error.LookupError, error.RepoLookupError):
1361 pass # we ignore data for nodes that don't exist locally
1363 pass # we ignore data for nodes that don't exist locally
1362 finally:
1364 finally:
1363 if proc:
1365 if proc:
1364 proc.communicate()
1366 proc.communicate()
1365 if src:
1367 if src:
1366 src.close()
1368 src.close()
1367 if proc and proc.returncode != 0:
1369 if proc and proc.returncode != 0:
1368 raise error.Abort(_("extdata command '%s' failed: %s")
1370 raise error.Abort(_("extdata command '%s' failed: %s")
1369 % (cmd, procutil.explainexit(proc.returncode)))
1371 % (cmd, procutil.explainexit(proc.returncode)))
1370
1372
1371 return data
1373 return data
1372
1374
1373 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1375 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1374 if lock is None:
1376 if lock is None:
1375 raise error.LockInheritanceContractViolation(
1377 raise error.LockInheritanceContractViolation(
1376 'lock can only be inherited while held')
1378 'lock can only be inherited while held')
1377 if environ is None:
1379 if environ is None:
1378 environ = {}
1380 environ = {}
1379 with lock.inherit() as locker:
1381 with lock.inherit() as locker:
1380 environ[envvar] = locker
1382 environ[envvar] = locker
1381 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1383 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1382
1384
1383 def wlocksub(repo, cmd, *args, **kwargs):
1385 def wlocksub(repo, cmd, *args, **kwargs):
1384 """run cmd as a subprocess that allows inheriting repo's wlock
1386 """run cmd as a subprocess that allows inheriting repo's wlock
1385
1387
1386 This can only be called while the wlock is held. This takes all the
1388 This can only be called while the wlock is held. This takes all the
1387 arguments that ui.system does, and returns the exit code of the
1389 arguments that ui.system does, and returns the exit code of the
1388 subprocess."""
1390 subprocess."""
1389 return _locksub(repo, repo.currentwlock(), 'HG_WLOCK_LOCKER', cmd, *args,
1391 return _locksub(repo, repo.currentwlock(), 'HG_WLOCK_LOCKER', cmd, *args,
1390 **kwargs)
1392 **kwargs)
1391
1393
1392 class progress(object):
1394 class progress(object):
1393 def __init__(self, ui, topic, unit="", total=None):
1395 def __init__(self, ui, topic, unit="", total=None):
1394 self.ui = ui
1396 self.ui = ui
1395 self.pos = 0
1397 self.pos = 0
1396 self.topic = topic
1398 self.topic = topic
1397 self.unit = unit
1399 self.unit = unit
1398 self.total = total
1400 self.total = total
1399
1401
1400 def __enter__(self):
1402 def __enter__(self):
1401 return self
1403 return self
1402
1404
1403 def __exit__(self, exc_type, exc_value, exc_tb):
1405 def __exit__(self, exc_type, exc_value, exc_tb):
1404 self.complete()
1406 self.complete()
1405
1407
1406 def update(self, pos, item="", total=None):
1408 def update(self, pos, item="", total=None):
1407 assert pos is not None
1409 assert pos is not None
1408 if total:
1410 if total:
1409 self.total = total
1411 self.total = total
1410 self.pos = pos
1412 self.pos = pos
1411 self._print(item)
1413 self._print(item)
1412
1414
1413 def increment(self, step=1, item="", total=None):
1415 def increment(self, step=1, item="", total=None):
1414 self.update(self.pos + step, item, total)
1416 self.update(self.pos + step, item, total)
1415
1417
1416 def complete(self):
1418 def complete(self):
1417 self.ui.progress(self.topic, None)
1419 self.ui.progress(self.topic, None)
1418
1420
1419 def _print(self, item):
1421 def _print(self, item):
1420 self.ui.progress(self.topic, self.pos, item, self.unit,
1422 self.ui.progress(self.topic, self.pos, item, self.unit,
1421 self.total)
1423 self.total)
1422
1424
1423 def gdinitconfig(ui):
1425 def gdinitconfig(ui):
1424 """helper function to know if a repo should be created as general delta
1426 """helper function to know if a repo should be created as general delta
1425 """
1427 """
1426 # experimental config: format.generaldelta
1428 # experimental config: format.generaldelta
1427 return (ui.configbool('format', 'generaldelta')
1429 return (ui.configbool('format', 'generaldelta')
1428 or ui.configbool('format', 'usegeneraldelta')
1430 or ui.configbool('format', 'usegeneraldelta')
1429 or ui.configbool('format', 'sparse-revlog'))
1431 or ui.configbool('format', 'sparse-revlog'))
1430
1432
1431 def gddeltaconfig(ui):
1433 def gddeltaconfig(ui):
1432 """helper function to know if incoming delta should be optimised
1434 """helper function to know if incoming delta should be optimised
1433 """
1435 """
1434 # experimental config: format.generaldelta
1436 # experimental config: format.generaldelta
1435 return ui.configbool('format', 'generaldelta')
1437 return ui.configbool('format', 'generaldelta')
1436
1438
1437 class simplekeyvaluefile(object):
1439 class simplekeyvaluefile(object):
1438 """A simple file with key=value lines
1440 """A simple file with key=value lines
1439
1441
1440 Keys must be alphanumerics and start with a letter, values must not
1442 Keys must be alphanumerics and start with a letter, values must not
1441 contain '\n' characters"""
1443 contain '\n' characters"""
1442 firstlinekey = '__firstline'
1444 firstlinekey = '__firstline'
1443
1445
1444 def __init__(self, vfs, path, keys=None):
1446 def __init__(self, vfs, path, keys=None):
1445 self.vfs = vfs
1447 self.vfs = vfs
1446 self.path = path
1448 self.path = path
1447
1449
1448 def read(self, firstlinenonkeyval=False):
1450 def read(self, firstlinenonkeyval=False):
1449 """Read the contents of a simple key-value file
1451 """Read the contents of a simple key-value file
1450
1452
1451 'firstlinenonkeyval' indicates whether the first line of file should
1453 'firstlinenonkeyval' indicates whether the first line of file should
1452 be treated as a key-value pair or reuturned fully under the
1454 be treated as a key-value pair or reuturned fully under the
1453 __firstline key."""
1455 __firstline key."""
1454 lines = self.vfs.readlines(self.path)
1456 lines = self.vfs.readlines(self.path)
1455 d = {}
1457 d = {}
1456 if firstlinenonkeyval:
1458 if firstlinenonkeyval:
1457 if not lines:
1459 if not lines:
1458 e = _("empty simplekeyvalue file")
1460 e = _("empty simplekeyvalue file")
1459 raise error.CorruptedState(e)
1461 raise error.CorruptedState(e)
1460 # we don't want to include '\n' in the __firstline
1462 # we don't want to include '\n' in the __firstline
1461 d[self.firstlinekey] = lines[0][:-1]
1463 d[self.firstlinekey] = lines[0][:-1]
1462 del lines[0]
1464 del lines[0]
1463
1465
1464 try:
1466 try:
1465 # the 'if line.strip()' part prevents us from failing on empty
1467 # the 'if line.strip()' part prevents us from failing on empty
1466 # lines which only contain '\n' therefore are not skipped
1468 # lines which only contain '\n' therefore are not skipped
1467 # by 'if line'
1469 # by 'if line'
1468 updatedict = dict(line[:-1].split('=', 1) for line in lines
1470 updatedict = dict(line[:-1].split('=', 1) for line in lines
1469 if line.strip())
1471 if line.strip())
1470 if self.firstlinekey in updatedict:
1472 if self.firstlinekey in updatedict:
1471 e = _("%r can't be used as a key")
1473 e = _("%r can't be used as a key")
1472 raise error.CorruptedState(e % self.firstlinekey)
1474 raise error.CorruptedState(e % self.firstlinekey)
1473 d.update(updatedict)
1475 d.update(updatedict)
1474 except ValueError as e:
1476 except ValueError as e:
1475 raise error.CorruptedState(str(e))
1477 raise error.CorruptedState(str(e))
1476 return d
1478 return d
1477
1479
1478 def write(self, data, firstline=None):
1480 def write(self, data, firstline=None):
1479 """Write key=>value mapping to a file
1481 """Write key=>value mapping to a file
1480 data is a dict. Keys must be alphanumerical and start with a letter.
1482 data is a dict. Keys must be alphanumerical and start with a letter.
1481 Values must not contain newline characters.
1483 Values must not contain newline characters.
1482
1484
1483 If 'firstline' is not None, it is written to file before
1485 If 'firstline' is not None, it is written to file before
1484 everything else, as it is, not in a key=value form"""
1486 everything else, as it is, not in a key=value form"""
1485 lines = []
1487 lines = []
1486 if firstline is not None:
1488 if firstline is not None:
1487 lines.append('%s\n' % firstline)
1489 lines.append('%s\n' % firstline)
1488
1490
1489 for k, v in data.items():
1491 for k, v in data.items():
1490 if k == self.firstlinekey:
1492 if k == self.firstlinekey:
1491 e = "key name '%s' is reserved" % self.firstlinekey
1493 e = "key name '%s' is reserved" % self.firstlinekey
1492 raise error.ProgrammingError(e)
1494 raise error.ProgrammingError(e)
1493 if not k[0:1].isalpha():
1495 if not k[0:1].isalpha():
1494 e = "keys must start with a letter in a key-value file"
1496 e = "keys must start with a letter in a key-value file"
1495 raise error.ProgrammingError(e)
1497 raise error.ProgrammingError(e)
1496 if not k.isalnum():
1498 if not k.isalnum():
1497 e = "invalid key name in a simple key-value file"
1499 e = "invalid key name in a simple key-value file"
1498 raise error.ProgrammingError(e)
1500 raise error.ProgrammingError(e)
1499 if '\n' in v:
1501 if '\n' in v:
1500 e = "invalid value in a simple key-value file"
1502 e = "invalid value in a simple key-value file"
1501 raise error.ProgrammingError(e)
1503 raise error.ProgrammingError(e)
1502 lines.append("%s=%s\n" % (k, v))
1504 lines.append("%s=%s\n" % (k, v))
1503 with self.vfs(self.path, mode='wb', atomictemp=True) as fp:
1505 with self.vfs(self.path, mode='wb', atomictemp=True) as fp:
1504 fp.write(''.join(lines))
1506 fp.write(''.join(lines))
1505
1507
1506 _reportobsoletedsource = [
1508 _reportobsoletedsource = [
1507 'debugobsolete',
1509 'debugobsolete',
1508 'pull',
1510 'pull',
1509 'push',
1511 'push',
1510 'serve',
1512 'serve',
1511 'unbundle',
1513 'unbundle',
1512 ]
1514 ]
1513
1515
1514 _reportnewcssource = [
1516 _reportnewcssource = [
1515 'pull',
1517 'pull',
1516 'unbundle',
1518 'unbundle',
1517 ]
1519 ]
1518
1520
1519 def prefetchfiles(repo, revs, match):
1521 def prefetchfiles(repo, revs, match):
1520 """Invokes the registered file prefetch functions, allowing extensions to
1522 """Invokes the registered file prefetch functions, allowing extensions to
1521 ensure the corresponding files are available locally, before the command
1523 ensure the corresponding files are available locally, before the command
1522 uses them."""
1524 uses them."""
1523 if match:
1525 if match:
1524 # The command itself will complain about files that don't exist, so
1526 # The command itself will complain about files that don't exist, so
1525 # don't duplicate the message.
1527 # don't duplicate the message.
1526 match = matchmod.badmatch(match, lambda fn, msg: None)
1528 match = matchmod.badmatch(match, lambda fn, msg: None)
1527 else:
1529 else:
1528 match = matchall(repo)
1530 match = matchall(repo)
1529
1531
1530 fileprefetchhooks(repo, revs, match)
1532 fileprefetchhooks(repo, revs, match)
1531
1533
1532 # a list of (repo, revs, match) prefetch functions
1534 # a list of (repo, revs, match) prefetch functions
1533 fileprefetchhooks = util.hooks()
1535 fileprefetchhooks = util.hooks()
1534
1536
1535 # A marker that tells the evolve extension to suppress its own reporting
1537 # A marker that tells the evolve extension to suppress its own reporting
1536 _reportstroubledchangesets = True
1538 _reportstroubledchangesets = True
1537
1539
1538 def registersummarycallback(repo, otr, txnname=''):
1540 def registersummarycallback(repo, otr, txnname=''):
1539 """register a callback to issue a summary after the transaction is closed
1541 """register a callback to issue a summary after the transaction is closed
1540 """
1542 """
1541 def txmatch(sources):
1543 def txmatch(sources):
1542 return any(txnname.startswith(source) for source in sources)
1544 return any(txnname.startswith(source) for source in sources)
1543
1545
1544 categories = []
1546 categories = []
1545
1547
1546 def reportsummary(func):
1548 def reportsummary(func):
1547 """decorator for report callbacks."""
1549 """decorator for report callbacks."""
1548 # The repoview life cycle is shorter than the one of the actual
1550 # The repoview life cycle is shorter than the one of the actual
1549 # underlying repository. So the filtered object can die before the
1551 # underlying repository. So the filtered object can die before the
1550 # weakref is used leading to troubles. We keep a reference to the
1552 # weakref is used leading to troubles. We keep a reference to the
1551 # unfiltered object and restore the filtering when retrieving the
1553 # unfiltered object and restore the filtering when retrieving the
1552 # repository through the weakref.
1554 # repository through the weakref.
1553 filtername = repo.filtername
1555 filtername = repo.filtername
1554 reporef = weakref.ref(repo.unfiltered())
1556 reporef = weakref.ref(repo.unfiltered())
1555 def wrapped(tr):
1557 def wrapped(tr):
1556 repo = reporef()
1558 repo = reporef()
1557 if filtername:
1559 if filtername:
1558 repo = repo.filtered(filtername)
1560 repo = repo.filtered(filtername)
1559 func(repo, tr)
1561 func(repo, tr)
1560 newcat = '%02i-txnreport' % len(categories)
1562 newcat = '%02i-txnreport' % len(categories)
1561 otr.addpostclose(newcat, wrapped)
1563 otr.addpostclose(newcat, wrapped)
1562 categories.append(newcat)
1564 categories.append(newcat)
1563 return wrapped
1565 return wrapped
1564
1566
1565 if txmatch(_reportobsoletedsource):
1567 if txmatch(_reportobsoletedsource):
1566 @reportsummary
1568 @reportsummary
1567 def reportobsoleted(repo, tr):
1569 def reportobsoleted(repo, tr):
1568 obsoleted = obsutil.getobsoleted(repo, tr)
1570 obsoleted = obsutil.getobsoleted(repo, tr)
1569 if obsoleted:
1571 if obsoleted:
1570 repo.ui.status(_('obsoleted %i changesets\n')
1572 repo.ui.status(_('obsoleted %i changesets\n')
1571 % len(obsoleted))
1573 % len(obsoleted))
1572
1574
1573 if (obsolete.isenabled(repo, obsolete.createmarkersopt) and
1575 if (obsolete.isenabled(repo, obsolete.createmarkersopt) and
1574 repo.ui.configbool('experimental', 'evolution.report-instabilities')):
1576 repo.ui.configbool('experimental', 'evolution.report-instabilities')):
1575 instabilitytypes = [
1577 instabilitytypes = [
1576 ('orphan', 'orphan'),
1578 ('orphan', 'orphan'),
1577 ('phase-divergent', 'phasedivergent'),
1579 ('phase-divergent', 'phasedivergent'),
1578 ('content-divergent', 'contentdivergent'),
1580 ('content-divergent', 'contentdivergent'),
1579 ]
1581 ]
1580
1582
1581 def getinstabilitycounts(repo):
1583 def getinstabilitycounts(repo):
1582 filtered = repo.changelog.filteredrevs
1584 filtered = repo.changelog.filteredrevs
1583 counts = {}
1585 counts = {}
1584 for instability, revset in instabilitytypes:
1586 for instability, revset in instabilitytypes:
1585 counts[instability] = len(set(obsolete.getrevs(repo, revset)) -
1587 counts[instability] = len(set(obsolete.getrevs(repo, revset)) -
1586 filtered)
1588 filtered)
1587 return counts
1589 return counts
1588
1590
1589 oldinstabilitycounts = getinstabilitycounts(repo)
1591 oldinstabilitycounts = getinstabilitycounts(repo)
1590 @reportsummary
1592 @reportsummary
1591 def reportnewinstabilities(repo, tr):
1593 def reportnewinstabilities(repo, tr):
1592 newinstabilitycounts = getinstabilitycounts(repo)
1594 newinstabilitycounts = getinstabilitycounts(repo)
1593 for instability, revset in instabilitytypes:
1595 for instability, revset in instabilitytypes:
1594 delta = (newinstabilitycounts[instability] -
1596 delta = (newinstabilitycounts[instability] -
1595 oldinstabilitycounts[instability])
1597 oldinstabilitycounts[instability])
1596 msg = getinstabilitymessage(delta, instability)
1598 msg = getinstabilitymessage(delta, instability)
1597 if msg:
1599 if msg:
1598 repo.ui.warn(msg)
1600 repo.ui.warn(msg)
1599
1601
1600 if txmatch(_reportnewcssource):
1602 if txmatch(_reportnewcssource):
1601 @reportsummary
1603 @reportsummary
1602 def reportnewcs(repo, tr):
1604 def reportnewcs(repo, tr):
1603 """Report the range of new revisions pulled/unbundled."""
1605 """Report the range of new revisions pulled/unbundled."""
1604 origrepolen = tr.changes.get('origrepolen', len(repo))
1606 origrepolen = tr.changes.get('origrepolen', len(repo))
1605 if origrepolen >= len(repo):
1607 if origrepolen >= len(repo):
1606 return
1608 return
1607
1609
1608 # Compute the bounds of new revisions' range, excluding obsoletes.
1610 # Compute the bounds of new revisions' range, excluding obsoletes.
1609 unfi = repo.unfiltered()
1611 unfi = repo.unfiltered()
1610 revs = unfi.revs('%d: and not obsolete()', origrepolen)
1612 revs = unfi.revs('%d: and not obsolete()', origrepolen)
1611 if not revs:
1613 if not revs:
1612 # Got only obsoletes.
1614 # Got only obsoletes.
1613 return
1615 return
1614 minrev, maxrev = repo[revs.min()], repo[revs.max()]
1616 minrev, maxrev = repo[revs.min()], repo[revs.max()]
1615
1617
1616 if minrev == maxrev:
1618 if minrev == maxrev:
1617 revrange = minrev
1619 revrange = minrev
1618 else:
1620 else:
1619 revrange = '%s:%s' % (minrev, maxrev)
1621 revrange = '%s:%s' % (minrev, maxrev)
1620 draft = len(repo.revs('%ld and draft()', revs))
1622 draft = len(repo.revs('%ld and draft()', revs))
1621 secret = len(repo.revs('%ld and secret()', revs))
1623 secret = len(repo.revs('%ld and secret()', revs))
1622 if not (draft or secret):
1624 if not (draft or secret):
1623 msg = _('new changesets %s\n') % revrange
1625 msg = _('new changesets %s\n') % revrange
1624 elif draft and secret:
1626 elif draft and secret:
1625 msg = _('new changesets %s (%d drafts, %d secrets)\n')
1627 msg = _('new changesets %s (%d drafts, %d secrets)\n')
1626 msg %= (revrange, draft, secret)
1628 msg %= (revrange, draft, secret)
1627 elif draft:
1629 elif draft:
1628 msg = _('new changesets %s (%d drafts)\n')
1630 msg = _('new changesets %s (%d drafts)\n')
1629 msg %= (revrange, draft)
1631 msg %= (revrange, draft)
1630 elif secret:
1632 elif secret:
1631 msg = _('new changesets %s (%d secrets)\n')
1633 msg = _('new changesets %s (%d secrets)\n')
1632 msg %= (revrange, secret)
1634 msg %= (revrange, secret)
1633 else:
1635 else:
1634 raise error.ProgrammingError('entered unreachable condition')
1636 raise error.ProgrammingError('entered unreachable condition')
1635 repo.ui.status(msg)
1637 repo.ui.status(msg)
1636
1638
1637 @reportsummary
1639 @reportsummary
1638 def reportphasechanges(repo, tr):
1640 def reportphasechanges(repo, tr):
1639 """Report statistics of phase changes for changesets pre-existing
1641 """Report statistics of phase changes for changesets pre-existing
1640 pull/unbundle.
1642 pull/unbundle.
1641 """
1643 """
1642 origrepolen = tr.changes.get('origrepolen', len(repo))
1644 origrepolen = tr.changes.get('origrepolen', len(repo))
1643 phasetracking = tr.changes.get('phases', {})
1645 phasetracking = tr.changes.get('phases', {})
1644 if not phasetracking:
1646 if not phasetracking:
1645 return
1647 return
1646 published = [
1648 published = [
1647 rev for rev, (old, new) in phasetracking.iteritems()
1649 rev for rev, (old, new) in phasetracking.iteritems()
1648 if new == phases.public and rev < origrepolen
1650 if new == phases.public and rev < origrepolen
1649 ]
1651 ]
1650 if not published:
1652 if not published:
1651 return
1653 return
1652 repo.ui.status(_('%d local changesets published\n')
1654 repo.ui.status(_('%d local changesets published\n')
1653 % len(published))
1655 % len(published))
1654
1656
1655 def getinstabilitymessage(delta, instability):
1657 def getinstabilitymessage(delta, instability):
1656 """function to return the message to show warning about new instabilities
1658 """function to return the message to show warning about new instabilities
1657
1659
1658 exists as a separate function so that extension can wrap to show more
1660 exists as a separate function so that extension can wrap to show more
1659 information like how to fix instabilities"""
1661 information like how to fix instabilities"""
1660 if delta > 0:
1662 if delta > 0:
1661 return _('%i new %s changesets\n') % (delta, instability)
1663 return _('%i new %s changesets\n') % (delta, instability)
1662
1664
1663 def nodesummaries(repo, nodes, maxnumnodes=4):
1665 def nodesummaries(repo, nodes, maxnumnodes=4):
1664 if len(nodes) <= maxnumnodes or repo.ui.verbose:
1666 if len(nodes) <= maxnumnodes or repo.ui.verbose:
1665 return ' '.join(short(h) for h in nodes)
1667 return ' '.join(short(h) for h in nodes)
1666 first = ' '.join(short(h) for h in nodes[:maxnumnodes])
1668 first = ' '.join(short(h) for h in nodes[:maxnumnodes])
1667 return _("%s and %d others") % (first, len(nodes) - maxnumnodes)
1669 return _("%s and %d others") % (first, len(nodes) - maxnumnodes)
1668
1670
1669 def enforcesinglehead(repo, tr, desc):
1671 def enforcesinglehead(repo, tr, desc):
1670 """check that no named branch has multiple heads"""
1672 """check that no named branch has multiple heads"""
1671 if desc in ('strip', 'repair'):
1673 if desc in ('strip', 'repair'):
1672 # skip the logic during strip
1674 # skip the logic during strip
1673 return
1675 return
1674 visible = repo.filtered('visible')
1676 visible = repo.filtered('visible')
1675 # possible improvement: we could restrict the check to affected branch
1677 # possible improvement: we could restrict the check to affected branch
1676 for name, heads in visible.branchmap().iteritems():
1678 for name, heads in visible.branchmap().iteritems():
1677 if len(heads) > 1:
1679 if len(heads) > 1:
1678 msg = _('rejecting multiple heads on branch "%s"')
1680 msg = _('rejecting multiple heads on branch "%s"')
1679 msg %= name
1681 msg %= name
1680 hint = _('%d heads: %s')
1682 hint = _('%d heads: %s')
1681 hint %= (len(heads), nodesummaries(repo, heads))
1683 hint %= (len(heads), nodesummaries(repo, heads))
1682 raise error.Abort(msg, hint=hint)
1684 raise error.Abort(msg, hint=hint)
1683
1685
1684 def wrapconvertsink(sink):
1686 def wrapconvertsink(sink):
1685 """Allow extensions to wrap the sink returned by convcmd.convertsink()
1687 """Allow extensions to wrap the sink returned by convcmd.convertsink()
1686 before it is used, whether or not the convert extension was formally loaded.
1688 before it is used, whether or not the convert extension was formally loaded.
1687 """
1689 """
1688 return sink
1690 return sink
1689
1691
1690 def unhidehashlikerevs(repo, specs, hiddentype):
1692 def unhidehashlikerevs(repo, specs, hiddentype):
1691 """parse the user specs and unhide changesets whose hash or revision number
1693 """parse the user specs and unhide changesets whose hash or revision number
1692 is passed.
1694 is passed.
1693
1695
1694 hiddentype can be: 1) 'warn': warn while unhiding changesets
1696 hiddentype can be: 1) 'warn': warn while unhiding changesets
1695 2) 'nowarn': don't warn while unhiding changesets
1697 2) 'nowarn': don't warn while unhiding changesets
1696
1698
1697 returns a repo object with the required changesets unhidden
1699 returns a repo object with the required changesets unhidden
1698 """
1700 """
1699 if not repo.filtername or not repo.ui.configbool('experimental',
1701 if not repo.filtername or not repo.ui.configbool('experimental',
1700 'directaccess'):
1702 'directaccess'):
1701 return repo
1703 return repo
1702
1704
1703 if repo.filtername not in ('visible', 'visible-hidden'):
1705 if repo.filtername not in ('visible', 'visible-hidden'):
1704 return repo
1706 return repo
1705
1707
1706 symbols = set()
1708 symbols = set()
1707 for spec in specs:
1709 for spec in specs:
1708 try:
1710 try:
1709 tree = revsetlang.parse(spec)
1711 tree = revsetlang.parse(spec)
1710 except error.ParseError: # will be reported by scmutil.revrange()
1712 except error.ParseError: # will be reported by scmutil.revrange()
1711 continue
1713 continue
1712
1714
1713 symbols.update(revsetlang.gethashlikesymbols(tree))
1715 symbols.update(revsetlang.gethashlikesymbols(tree))
1714
1716
1715 if not symbols:
1717 if not symbols:
1716 return repo
1718 return repo
1717
1719
1718 revs = _getrevsfromsymbols(repo, symbols)
1720 revs = _getrevsfromsymbols(repo, symbols)
1719
1721
1720 if not revs:
1722 if not revs:
1721 return repo
1723 return repo
1722
1724
1723 if hiddentype == 'warn':
1725 if hiddentype == 'warn':
1724 unfi = repo.unfiltered()
1726 unfi = repo.unfiltered()
1725 revstr = ", ".join([pycompat.bytestr(unfi[l]) for l in revs])
1727 revstr = ", ".join([pycompat.bytestr(unfi[l]) for l in revs])
1726 repo.ui.warn(_("warning: accessing hidden changesets for write "
1728 repo.ui.warn(_("warning: accessing hidden changesets for write "
1727 "operation: %s\n") % revstr)
1729 "operation: %s\n") % revstr)
1728
1730
1729 # we have to use new filtername to separate branch/tags cache until we can
1731 # we have to use new filtername to separate branch/tags cache until we can
1730 # disbale these cache when revisions are dynamically pinned.
1732 # disbale these cache when revisions are dynamically pinned.
1731 return repo.filtered('visible-hidden', revs)
1733 return repo.filtered('visible-hidden', revs)
1732
1734
1733 def _getrevsfromsymbols(repo, symbols):
1735 def _getrevsfromsymbols(repo, symbols):
1734 """parse the list of symbols and returns a set of revision numbers of hidden
1736 """parse the list of symbols and returns a set of revision numbers of hidden
1735 changesets present in symbols"""
1737 changesets present in symbols"""
1736 revs = set()
1738 revs = set()
1737 unfi = repo.unfiltered()
1739 unfi = repo.unfiltered()
1738 unficl = unfi.changelog
1740 unficl = unfi.changelog
1739 cl = repo.changelog
1741 cl = repo.changelog
1740 tiprev = len(unficl)
1742 tiprev = len(unficl)
1741 allowrevnums = repo.ui.configbool('experimental', 'directaccess.revnums')
1743 allowrevnums = repo.ui.configbool('experimental', 'directaccess.revnums')
1742 for s in symbols:
1744 for s in symbols:
1743 try:
1745 try:
1744 n = int(s)
1746 n = int(s)
1745 if n <= tiprev:
1747 if n <= tiprev:
1746 if not allowrevnums:
1748 if not allowrevnums:
1747 continue
1749 continue
1748 else:
1750 else:
1749 if n not in cl:
1751 if n not in cl:
1750 revs.add(n)
1752 revs.add(n)
1751 continue
1753 continue
1752 except ValueError:
1754 except ValueError:
1753 pass
1755 pass
1754
1756
1755 try:
1757 try:
1756 s = resolvehexnodeidprefix(unfi, s)
1758 s = resolvehexnodeidprefix(unfi, s)
1757 except (error.LookupError, error.WdirUnsupported):
1759 except (error.LookupError, error.WdirUnsupported):
1758 s = None
1760 s = None
1759
1761
1760 if s is not None:
1762 if s is not None:
1761 rev = unficl.rev(s)
1763 rev = unficl.rev(s)
1762 if rev not in cl:
1764 if rev not in cl:
1763 revs.add(rev)
1765 revs.add(rev)
1764
1766
1765 return revs
1767 return revs
1766
1768
1767 def bookmarkrevs(repo, mark):
1769 def bookmarkrevs(repo, mark):
1768 """
1770 """
1769 Select revisions reachable by a given bookmark
1771 Select revisions reachable by a given bookmark
1770 """
1772 """
1771 return repo.revs("ancestors(bookmark(%s)) - "
1773 return repo.revs("ancestors(bookmark(%s)) - "
1772 "ancestors(head() and not bookmark(%s)) - "
1774 "ancestors(head() and not bookmark(%s)) - "
1773 "ancestors(bookmark() and not bookmark(%s))",
1775 "ancestors(bookmark() and not bookmark(%s))",
1774 mark, mark, mark)
1776 mark, mark, mark)
General Comments 0
You need to be logged in to leave comments. Login now