##// END OF EJS Templates
scmutil: wrap locker information in bytestr before repr()ing it...
Augie Fackler -
r40203:c554dc0c default
parent child Browse files
Show More
@@ -1,1802 +1,1803 b''
1 # scmutil.py - Mercurial core utility functions
1 # scmutil.py - Mercurial core utility functions
2 #
2 #
3 # Copyright Matt Mackall <mpm@selenic.com>
3 # Copyright Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import glob
11 import glob
12 import hashlib
12 import hashlib
13 import os
13 import os
14 import re
14 import re
15 import socket
15 import socket
16 import subprocess
16 import subprocess
17 import weakref
17 import weakref
18
18
19 from .i18n import _
19 from .i18n import _
20 from .node import (
20 from .node import (
21 bin,
21 bin,
22 hex,
22 hex,
23 nullid,
23 nullid,
24 nullrev,
24 nullrev,
25 short,
25 short,
26 wdirid,
26 wdirid,
27 wdirrev,
27 wdirrev,
28 )
28 )
29
29
30 from . import (
30 from . import (
31 encoding,
31 encoding,
32 error,
32 error,
33 match as matchmod,
33 match as matchmod,
34 obsolete,
34 obsolete,
35 obsutil,
35 obsutil,
36 pathutil,
36 pathutil,
37 phases,
37 phases,
38 policy,
38 policy,
39 pycompat,
39 pycompat,
40 revsetlang,
40 revsetlang,
41 similar,
41 similar,
42 smartset,
42 smartset,
43 url,
43 url,
44 util,
44 util,
45 vfs,
45 vfs,
46 )
46 )
47
47
48 from .utils import (
48 from .utils import (
49 procutil,
49 procutil,
50 stringutil,
50 stringutil,
51 )
51 )
52
52
53 if pycompat.iswindows:
53 if pycompat.iswindows:
54 from . import scmwindows as scmplatform
54 from . import scmwindows as scmplatform
55 else:
55 else:
56 from . import scmposix as scmplatform
56 from . import scmposix as scmplatform
57
57
58 parsers = policy.importmod(r'parsers')
58 parsers = policy.importmod(r'parsers')
59
59
60 termsize = scmplatform.termsize
60 termsize = scmplatform.termsize
61
61
62 class status(tuple):
62 class status(tuple):
63 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
63 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
64 and 'ignored' properties are only relevant to the working copy.
64 and 'ignored' properties are only relevant to the working copy.
65 '''
65 '''
66
66
67 __slots__ = ()
67 __slots__ = ()
68
68
69 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
69 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
70 clean):
70 clean):
71 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
71 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
72 ignored, clean))
72 ignored, clean))
73
73
74 @property
74 @property
75 def modified(self):
75 def modified(self):
76 '''files that have been modified'''
76 '''files that have been modified'''
77 return self[0]
77 return self[0]
78
78
79 @property
79 @property
80 def added(self):
80 def added(self):
81 '''files that have been added'''
81 '''files that have been added'''
82 return self[1]
82 return self[1]
83
83
84 @property
84 @property
85 def removed(self):
85 def removed(self):
86 '''files that have been removed'''
86 '''files that have been removed'''
87 return self[2]
87 return self[2]
88
88
89 @property
89 @property
90 def deleted(self):
90 def deleted(self):
91 '''files that are in the dirstate, but have been deleted from the
91 '''files that are in the dirstate, but have been deleted from the
92 working copy (aka "missing")
92 working copy (aka "missing")
93 '''
93 '''
94 return self[3]
94 return self[3]
95
95
96 @property
96 @property
97 def unknown(self):
97 def unknown(self):
98 '''files not in the dirstate that are not ignored'''
98 '''files not in the dirstate that are not ignored'''
99 return self[4]
99 return self[4]
100
100
101 @property
101 @property
102 def ignored(self):
102 def ignored(self):
103 '''files not in the dirstate that are ignored (by _dirignore())'''
103 '''files not in the dirstate that are ignored (by _dirignore())'''
104 return self[5]
104 return self[5]
105
105
106 @property
106 @property
107 def clean(self):
107 def clean(self):
108 '''files that have not been modified'''
108 '''files that have not been modified'''
109 return self[6]
109 return self[6]
110
110
111 def __repr__(self, *args, **kwargs):
111 def __repr__(self, *args, **kwargs):
112 return ((r'<status modified=%s, added=%s, removed=%s, deleted=%s, '
112 return ((r'<status modified=%s, added=%s, removed=%s, deleted=%s, '
113 r'unknown=%s, ignored=%s, clean=%s>') %
113 r'unknown=%s, ignored=%s, clean=%s>') %
114 tuple(pycompat.sysstr(stringutil.pprint(v)) for v in self))
114 tuple(pycompat.sysstr(stringutil.pprint(v)) for v in self))
115
115
116 def itersubrepos(ctx1, ctx2):
116 def itersubrepos(ctx1, ctx2):
117 """find subrepos in ctx1 or ctx2"""
117 """find subrepos in ctx1 or ctx2"""
118 # Create a (subpath, ctx) mapping where we prefer subpaths from
118 # Create a (subpath, ctx) mapping where we prefer subpaths from
119 # ctx1. The subpaths from ctx2 are important when the .hgsub file
119 # ctx1. The subpaths from ctx2 are important when the .hgsub file
120 # has been modified (in ctx2) but not yet committed (in ctx1).
120 # has been modified (in ctx2) but not yet committed (in ctx1).
121 subpaths = dict.fromkeys(ctx2.substate, ctx2)
121 subpaths = dict.fromkeys(ctx2.substate, ctx2)
122 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
122 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
123
123
124 missing = set()
124 missing = set()
125
125
126 for subpath in ctx2.substate:
126 for subpath in ctx2.substate:
127 if subpath not in ctx1.substate:
127 if subpath not in ctx1.substate:
128 del subpaths[subpath]
128 del subpaths[subpath]
129 missing.add(subpath)
129 missing.add(subpath)
130
130
131 for subpath, ctx in sorted(subpaths.iteritems()):
131 for subpath, ctx in sorted(subpaths.iteritems()):
132 yield subpath, ctx.sub(subpath)
132 yield subpath, ctx.sub(subpath)
133
133
134 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
134 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
135 # status and diff will have an accurate result when it does
135 # status and diff will have an accurate result when it does
136 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
136 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
137 # against itself.
137 # against itself.
138 for subpath in missing:
138 for subpath in missing:
139 yield subpath, ctx2.nullsub(subpath, ctx1)
139 yield subpath, ctx2.nullsub(subpath, ctx1)
140
140
141 def nochangesfound(ui, repo, excluded=None):
141 def nochangesfound(ui, repo, excluded=None):
142 '''Report no changes for push/pull, excluded is None or a list of
142 '''Report no changes for push/pull, excluded is None or a list of
143 nodes excluded from the push/pull.
143 nodes excluded from the push/pull.
144 '''
144 '''
145 secretlist = []
145 secretlist = []
146 if excluded:
146 if excluded:
147 for n in excluded:
147 for n in excluded:
148 ctx = repo[n]
148 ctx = repo[n]
149 if ctx.phase() >= phases.secret and not ctx.extinct():
149 if ctx.phase() >= phases.secret and not ctx.extinct():
150 secretlist.append(n)
150 secretlist.append(n)
151
151
152 if secretlist:
152 if secretlist:
153 ui.status(_("no changes found (ignored %d secret changesets)\n")
153 ui.status(_("no changes found (ignored %d secret changesets)\n")
154 % len(secretlist))
154 % len(secretlist))
155 else:
155 else:
156 ui.status(_("no changes found\n"))
156 ui.status(_("no changes found\n"))
157
157
158 def callcatch(ui, func):
158 def callcatch(ui, func):
159 """call func() with global exception handling
159 """call func() with global exception handling
160
160
161 return func() if no exception happens. otherwise do some error handling
161 return func() if no exception happens. otherwise do some error handling
162 and return an exit code accordingly. does not handle all exceptions.
162 and return an exit code accordingly. does not handle all exceptions.
163 """
163 """
164 try:
164 try:
165 try:
165 try:
166 return func()
166 return func()
167 except: # re-raises
167 except: # re-raises
168 ui.traceback()
168 ui.traceback()
169 raise
169 raise
170 # Global exception handling, alphabetically
170 # Global exception handling, alphabetically
171 # Mercurial-specific first, followed by built-in and library exceptions
171 # Mercurial-specific first, followed by built-in and library exceptions
172 except error.LockHeld as inst:
172 except error.LockHeld as inst:
173 if inst.errno == errno.ETIMEDOUT:
173 if inst.errno == errno.ETIMEDOUT:
174 reason = _('timed out waiting for lock held by %r') % inst.locker
174 reason = _('timed out waiting for lock held by %r') % (
175 pycompat.bytestr(inst.locker))
175 else:
176 else:
176 reason = _('lock held by %r') % inst.locker
177 reason = _('lock held by %r') % inst.locker
177 ui.error(_("abort: %s: %s\n") % (
178 ui.error(_("abort: %s: %s\n") % (
178 inst.desc or stringutil.forcebytestr(inst.filename), reason))
179 inst.desc or stringutil.forcebytestr(inst.filename), reason))
179 if not inst.locker:
180 if not inst.locker:
180 ui.error(_("(lock might be very busy)\n"))
181 ui.error(_("(lock might be very busy)\n"))
181 except error.LockUnavailable as inst:
182 except error.LockUnavailable as inst:
182 ui.error(_("abort: could not lock %s: %s\n") %
183 ui.error(_("abort: could not lock %s: %s\n") %
183 (inst.desc or stringutil.forcebytestr(inst.filename),
184 (inst.desc or stringutil.forcebytestr(inst.filename),
184 encoding.strtolocal(inst.strerror)))
185 encoding.strtolocal(inst.strerror)))
185 except error.OutOfBandError as inst:
186 except error.OutOfBandError as inst:
186 if inst.args:
187 if inst.args:
187 msg = _("abort: remote error:\n")
188 msg = _("abort: remote error:\n")
188 else:
189 else:
189 msg = _("abort: remote error\n")
190 msg = _("abort: remote error\n")
190 ui.error(msg)
191 ui.error(msg)
191 if inst.args:
192 if inst.args:
192 ui.error(''.join(inst.args))
193 ui.error(''.join(inst.args))
193 if inst.hint:
194 if inst.hint:
194 ui.error('(%s)\n' % inst.hint)
195 ui.error('(%s)\n' % inst.hint)
195 except error.RepoError as inst:
196 except error.RepoError as inst:
196 ui.error(_("abort: %s!\n") % inst)
197 ui.error(_("abort: %s!\n") % inst)
197 if inst.hint:
198 if inst.hint:
198 ui.error(_("(%s)\n") % inst.hint)
199 ui.error(_("(%s)\n") % inst.hint)
199 except error.ResponseError as inst:
200 except error.ResponseError as inst:
200 ui.error(_("abort: %s") % inst.args[0])
201 ui.error(_("abort: %s") % inst.args[0])
201 msg = inst.args[1]
202 msg = inst.args[1]
202 if isinstance(msg, type(u'')):
203 if isinstance(msg, type(u'')):
203 msg = pycompat.sysbytes(msg)
204 msg = pycompat.sysbytes(msg)
204 if not isinstance(msg, bytes):
205 if not isinstance(msg, bytes):
205 ui.error(" %r\n" % (msg,))
206 ui.error(" %r\n" % (msg,))
206 elif not msg:
207 elif not msg:
207 ui.error(_(" empty string\n"))
208 ui.error(_(" empty string\n"))
208 else:
209 else:
209 ui.error("\n%r\n" % pycompat.bytestr(stringutil.ellipsis(msg)))
210 ui.error("\n%r\n" % pycompat.bytestr(stringutil.ellipsis(msg)))
210 except error.CensoredNodeError as inst:
211 except error.CensoredNodeError as inst:
211 ui.error(_("abort: file censored %s!\n") % inst)
212 ui.error(_("abort: file censored %s!\n") % inst)
212 except error.StorageError as inst:
213 except error.StorageError as inst:
213 ui.error(_("abort: %s!\n") % inst)
214 ui.error(_("abort: %s!\n") % inst)
214 except error.InterventionRequired as inst:
215 except error.InterventionRequired as inst:
215 ui.error("%s\n" % inst)
216 ui.error("%s\n" % inst)
216 if inst.hint:
217 if inst.hint:
217 ui.error(_("(%s)\n") % inst.hint)
218 ui.error(_("(%s)\n") % inst.hint)
218 return 1
219 return 1
219 except error.WdirUnsupported:
220 except error.WdirUnsupported:
220 ui.error(_("abort: working directory revision cannot be specified\n"))
221 ui.error(_("abort: working directory revision cannot be specified\n"))
221 except error.Abort as inst:
222 except error.Abort as inst:
222 ui.error(_("abort: %s\n") % inst)
223 ui.error(_("abort: %s\n") % inst)
223 if inst.hint:
224 if inst.hint:
224 ui.error(_("(%s)\n") % inst.hint)
225 ui.error(_("(%s)\n") % inst.hint)
225 except ImportError as inst:
226 except ImportError as inst:
226 ui.error(_("abort: %s!\n") % stringutil.forcebytestr(inst))
227 ui.error(_("abort: %s!\n") % stringutil.forcebytestr(inst))
227 m = stringutil.forcebytestr(inst).split()[-1]
228 m = stringutil.forcebytestr(inst).split()[-1]
228 if m in "mpatch bdiff".split():
229 if m in "mpatch bdiff".split():
229 ui.error(_("(did you forget to compile extensions?)\n"))
230 ui.error(_("(did you forget to compile extensions?)\n"))
230 elif m in "zlib".split():
231 elif m in "zlib".split():
231 ui.error(_("(is your Python install correct?)\n"))
232 ui.error(_("(is your Python install correct?)\n"))
232 except IOError as inst:
233 except IOError as inst:
233 if util.safehasattr(inst, "code"):
234 if util.safehasattr(inst, "code"):
234 ui.error(_("abort: %s\n") % stringutil.forcebytestr(inst))
235 ui.error(_("abort: %s\n") % stringutil.forcebytestr(inst))
235 elif util.safehasattr(inst, "reason"):
236 elif util.safehasattr(inst, "reason"):
236 try: # usually it is in the form (errno, strerror)
237 try: # usually it is in the form (errno, strerror)
237 reason = inst.reason.args[1]
238 reason = inst.reason.args[1]
238 except (AttributeError, IndexError):
239 except (AttributeError, IndexError):
239 # it might be anything, for example a string
240 # it might be anything, for example a string
240 reason = inst.reason
241 reason = inst.reason
241 if isinstance(reason, pycompat.unicode):
242 if isinstance(reason, pycompat.unicode):
242 # SSLError of Python 2.7.9 contains a unicode
243 # SSLError of Python 2.7.9 contains a unicode
243 reason = encoding.unitolocal(reason)
244 reason = encoding.unitolocal(reason)
244 ui.error(_("abort: error: %s\n") % reason)
245 ui.error(_("abort: error: %s\n") % reason)
245 elif (util.safehasattr(inst, "args")
246 elif (util.safehasattr(inst, "args")
246 and inst.args and inst.args[0] == errno.EPIPE):
247 and inst.args and inst.args[0] == errno.EPIPE):
247 pass
248 pass
248 elif getattr(inst, "strerror", None):
249 elif getattr(inst, "strerror", None):
249 if getattr(inst, "filename", None):
250 if getattr(inst, "filename", None):
250 ui.error(_("abort: %s: %s\n") % (
251 ui.error(_("abort: %s: %s\n") % (
251 encoding.strtolocal(inst.strerror),
252 encoding.strtolocal(inst.strerror),
252 stringutil.forcebytestr(inst.filename)))
253 stringutil.forcebytestr(inst.filename)))
253 else:
254 else:
254 ui.error(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
255 ui.error(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
255 else:
256 else:
256 raise
257 raise
257 except OSError as inst:
258 except OSError as inst:
258 if getattr(inst, "filename", None) is not None:
259 if getattr(inst, "filename", None) is not None:
259 ui.error(_("abort: %s: '%s'\n") % (
260 ui.error(_("abort: %s: '%s'\n") % (
260 encoding.strtolocal(inst.strerror),
261 encoding.strtolocal(inst.strerror),
261 stringutil.forcebytestr(inst.filename)))
262 stringutil.forcebytestr(inst.filename)))
262 else:
263 else:
263 ui.error(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
264 ui.error(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
264 except MemoryError:
265 except MemoryError:
265 ui.error(_("abort: out of memory\n"))
266 ui.error(_("abort: out of memory\n"))
266 except SystemExit as inst:
267 except SystemExit as inst:
267 # Commands shouldn't sys.exit directly, but give a return code.
268 # Commands shouldn't sys.exit directly, but give a return code.
268 # Just in case catch this and and pass exit code to caller.
269 # Just in case catch this and and pass exit code to caller.
269 return inst.code
270 return inst.code
270 except socket.error as inst:
271 except socket.error as inst:
271 ui.error(_("abort: %s\n") % stringutil.forcebytestr(inst.args[-1]))
272 ui.error(_("abort: %s\n") % stringutil.forcebytestr(inst.args[-1]))
272
273
273 return -1
274 return -1
274
275
275 def checknewlabel(repo, lbl, kind):
276 def checknewlabel(repo, lbl, kind):
276 # Do not use the "kind" parameter in ui output.
277 # Do not use the "kind" parameter in ui output.
277 # It makes strings difficult to translate.
278 # It makes strings difficult to translate.
278 if lbl in ['tip', '.', 'null']:
279 if lbl in ['tip', '.', 'null']:
279 raise error.Abort(_("the name '%s' is reserved") % lbl)
280 raise error.Abort(_("the name '%s' is reserved") % lbl)
280 for c in (':', '\0', '\n', '\r'):
281 for c in (':', '\0', '\n', '\r'):
281 if c in lbl:
282 if c in lbl:
282 raise error.Abort(
283 raise error.Abort(
283 _("%r cannot be used in a name") % pycompat.bytestr(c))
284 _("%r cannot be used in a name") % pycompat.bytestr(c))
284 try:
285 try:
285 int(lbl)
286 int(lbl)
286 raise error.Abort(_("cannot use an integer as a name"))
287 raise error.Abort(_("cannot use an integer as a name"))
287 except ValueError:
288 except ValueError:
288 pass
289 pass
289 if lbl.strip() != lbl:
290 if lbl.strip() != lbl:
290 raise error.Abort(_("leading or trailing whitespace in name %r") % lbl)
291 raise error.Abort(_("leading or trailing whitespace in name %r") % lbl)
291
292
292 def checkfilename(f):
293 def checkfilename(f):
293 '''Check that the filename f is an acceptable filename for a tracked file'''
294 '''Check that the filename f is an acceptable filename for a tracked file'''
294 if '\r' in f or '\n' in f:
295 if '\r' in f or '\n' in f:
295 raise error.Abort(_("'\\n' and '\\r' disallowed in filenames: %r")
296 raise error.Abort(_("'\\n' and '\\r' disallowed in filenames: %r")
296 % pycompat.bytestr(f))
297 % pycompat.bytestr(f))
297
298
298 def checkportable(ui, f):
299 def checkportable(ui, f):
299 '''Check if filename f is portable and warn or abort depending on config'''
300 '''Check if filename f is portable and warn or abort depending on config'''
300 checkfilename(f)
301 checkfilename(f)
301 abort, warn = checkportabilityalert(ui)
302 abort, warn = checkportabilityalert(ui)
302 if abort or warn:
303 if abort or warn:
303 msg = util.checkwinfilename(f)
304 msg = util.checkwinfilename(f)
304 if msg:
305 if msg:
305 msg = "%s: %s" % (msg, procutil.shellquote(f))
306 msg = "%s: %s" % (msg, procutil.shellquote(f))
306 if abort:
307 if abort:
307 raise error.Abort(msg)
308 raise error.Abort(msg)
308 ui.warn(_("warning: %s\n") % msg)
309 ui.warn(_("warning: %s\n") % msg)
309
310
310 def checkportabilityalert(ui):
311 def checkportabilityalert(ui):
311 '''check if the user's config requests nothing, a warning, or abort for
312 '''check if the user's config requests nothing, a warning, or abort for
312 non-portable filenames'''
313 non-portable filenames'''
313 val = ui.config('ui', 'portablefilenames')
314 val = ui.config('ui', 'portablefilenames')
314 lval = val.lower()
315 lval = val.lower()
315 bval = stringutil.parsebool(val)
316 bval = stringutil.parsebool(val)
316 abort = pycompat.iswindows or lval == 'abort'
317 abort = pycompat.iswindows or lval == 'abort'
317 warn = bval or lval == 'warn'
318 warn = bval or lval == 'warn'
318 if bval is None and not (warn or abort or lval == 'ignore'):
319 if bval is None and not (warn or abort or lval == 'ignore'):
319 raise error.ConfigError(
320 raise error.ConfigError(
320 _("ui.portablefilenames value is invalid ('%s')") % val)
321 _("ui.portablefilenames value is invalid ('%s')") % val)
321 return abort, warn
322 return abort, warn
322
323
323 class casecollisionauditor(object):
324 class casecollisionauditor(object):
324 def __init__(self, ui, abort, dirstate):
325 def __init__(self, ui, abort, dirstate):
325 self._ui = ui
326 self._ui = ui
326 self._abort = abort
327 self._abort = abort
327 allfiles = '\0'.join(dirstate._map)
328 allfiles = '\0'.join(dirstate._map)
328 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
329 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
329 self._dirstate = dirstate
330 self._dirstate = dirstate
330 # The purpose of _newfiles is so that we don't complain about
331 # The purpose of _newfiles is so that we don't complain about
331 # case collisions if someone were to call this object with the
332 # case collisions if someone were to call this object with the
332 # same filename twice.
333 # same filename twice.
333 self._newfiles = set()
334 self._newfiles = set()
334
335
335 def __call__(self, f):
336 def __call__(self, f):
336 if f in self._newfiles:
337 if f in self._newfiles:
337 return
338 return
338 fl = encoding.lower(f)
339 fl = encoding.lower(f)
339 if fl in self._loweredfiles and f not in self._dirstate:
340 if fl in self._loweredfiles and f not in self._dirstate:
340 msg = _('possible case-folding collision for %s') % f
341 msg = _('possible case-folding collision for %s') % f
341 if self._abort:
342 if self._abort:
342 raise error.Abort(msg)
343 raise error.Abort(msg)
343 self._ui.warn(_("warning: %s\n") % msg)
344 self._ui.warn(_("warning: %s\n") % msg)
344 self._loweredfiles.add(fl)
345 self._loweredfiles.add(fl)
345 self._newfiles.add(f)
346 self._newfiles.add(f)
346
347
347 def filteredhash(repo, maxrev):
348 def filteredhash(repo, maxrev):
348 """build hash of filtered revisions in the current repoview.
349 """build hash of filtered revisions in the current repoview.
349
350
350 Multiple caches perform up-to-date validation by checking that the
351 Multiple caches perform up-to-date validation by checking that the
351 tiprev and tipnode stored in the cache file match the current repository.
352 tiprev and tipnode stored in the cache file match the current repository.
352 However, this is not sufficient for validating repoviews because the set
353 However, this is not sufficient for validating repoviews because the set
353 of revisions in the view may change without the repository tiprev and
354 of revisions in the view may change without the repository tiprev and
354 tipnode changing.
355 tipnode changing.
355
356
356 This function hashes all the revs filtered from the view and returns
357 This function hashes all the revs filtered from the view and returns
357 that SHA-1 digest.
358 that SHA-1 digest.
358 """
359 """
359 cl = repo.changelog
360 cl = repo.changelog
360 if not cl.filteredrevs:
361 if not cl.filteredrevs:
361 return None
362 return None
362 key = None
363 key = None
363 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
364 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
364 if revs:
365 if revs:
365 s = hashlib.sha1()
366 s = hashlib.sha1()
366 for rev in revs:
367 for rev in revs:
367 s.update('%d;' % rev)
368 s.update('%d;' % rev)
368 key = s.digest()
369 key = s.digest()
369 return key
370 return key
370
371
371 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
372 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
372 '''yield every hg repository under path, always recursively.
373 '''yield every hg repository under path, always recursively.
373 The recurse flag will only control recursion into repo working dirs'''
374 The recurse flag will only control recursion into repo working dirs'''
374 def errhandler(err):
375 def errhandler(err):
375 if err.filename == path:
376 if err.filename == path:
376 raise err
377 raise err
377 samestat = getattr(os.path, 'samestat', None)
378 samestat = getattr(os.path, 'samestat', None)
378 if followsym and samestat is not None:
379 if followsym and samestat is not None:
379 def adddir(dirlst, dirname):
380 def adddir(dirlst, dirname):
380 dirstat = os.stat(dirname)
381 dirstat = os.stat(dirname)
381 match = any(samestat(dirstat, lstdirstat) for lstdirstat in dirlst)
382 match = any(samestat(dirstat, lstdirstat) for lstdirstat in dirlst)
382 if not match:
383 if not match:
383 dirlst.append(dirstat)
384 dirlst.append(dirstat)
384 return not match
385 return not match
385 else:
386 else:
386 followsym = False
387 followsym = False
387
388
388 if (seen_dirs is None) and followsym:
389 if (seen_dirs is None) and followsym:
389 seen_dirs = []
390 seen_dirs = []
390 adddir(seen_dirs, path)
391 adddir(seen_dirs, path)
391 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
392 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
392 dirs.sort()
393 dirs.sort()
393 if '.hg' in dirs:
394 if '.hg' in dirs:
394 yield root # found a repository
395 yield root # found a repository
395 qroot = os.path.join(root, '.hg', 'patches')
396 qroot = os.path.join(root, '.hg', 'patches')
396 if os.path.isdir(os.path.join(qroot, '.hg')):
397 if os.path.isdir(os.path.join(qroot, '.hg')):
397 yield qroot # we have a patch queue repo here
398 yield qroot # we have a patch queue repo here
398 if recurse:
399 if recurse:
399 # avoid recursing inside the .hg directory
400 # avoid recursing inside the .hg directory
400 dirs.remove('.hg')
401 dirs.remove('.hg')
401 else:
402 else:
402 dirs[:] = [] # don't descend further
403 dirs[:] = [] # don't descend further
403 elif followsym:
404 elif followsym:
404 newdirs = []
405 newdirs = []
405 for d in dirs:
406 for d in dirs:
406 fname = os.path.join(root, d)
407 fname = os.path.join(root, d)
407 if adddir(seen_dirs, fname):
408 if adddir(seen_dirs, fname):
408 if os.path.islink(fname):
409 if os.path.islink(fname):
409 for hgname in walkrepos(fname, True, seen_dirs):
410 for hgname in walkrepos(fname, True, seen_dirs):
410 yield hgname
411 yield hgname
411 else:
412 else:
412 newdirs.append(d)
413 newdirs.append(d)
413 dirs[:] = newdirs
414 dirs[:] = newdirs
414
415
415 def binnode(ctx):
416 def binnode(ctx):
416 """Return binary node id for a given basectx"""
417 """Return binary node id for a given basectx"""
417 node = ctx.node()
418 node = ctx.node()
418 if node is None:
419 if node is None:
419 return wdirid
420 return wdirid
420 return node
421 return node
421
422
422 def intrev(ctx):
423 def intrev(ctx):
423 """Return integer for a given basectx that can be used in comparison or
424 """Return integer for a given basectx that can be used in comparison or
424 arithmetic operation"""
425 arithmetic operation"""
425 rev = ctx.rev()
426 rev = ctx.rev()
426 if rev is None:
427 if rev is None:
427 return wdirrev
428 return wdirrev
428 return rev
429 return rev
429
430
430 def formatchangeid(ctx):
431 def formatchangeid(ctx):
431 """Format changectx as '{rev}:{node|formatnode}', which is the default
432 """Format changectx as '{rev}:{node|formatnode}', which is the default
432 template provided by logcmdutil.changesettemplater"""
433 template provided by logcmdutil.changesettemplater"""
433 repo = ctx.repo()
434 repo = ctx.repo()
434 return formatrevnode(repo.ui, intrev(ctx), binnode(ctx))
435 return formatrevnode(repo.ui, intrev(ctx), binnode(ctx))
435
436
436 def formatrevnode(ui, rev, node):
437 def formatrevnode(ui, rev, node):
437 """Format given revision and node depending on the current verbosity"""
438 """Format given revision and node depending on the current verbosity"""
438 if ui.debugflag:
439 if ui.debugflag:
439 hexfunc = hex
440 hexfunc = hex
440 else:
441 else:
441 hexfunc = short
442 hexfunc = short
442 return '%d:%s' % (rev, hexfunc(node))
443 return '%d:%s' % (rev, hexfunc(node))
443
444
444 def resolvehexnodeidprefix(repo, prefix):
445 def resolvehexnodeidprefix(repo, prefix):
445 if (prefix.startswith('x') and
446 if (prefix.startswith('x') and
446 repo.ui.configbool('experimental', 'revisions.prefixhexnode')):
447 repo.ui.configbool('experimental', 'revisions.prefixhexnode')):
447 prefix = prefix[1:]
448 prefix = prefix[1:]
448 try:
449 try:
449 # Uses unfiltered repo because it's faster when prefix is ambiguous/
450 # Uses unfiltered repo because it's faster when prefix is ambiguous/
450 # This matches the shortesthexnodeidprefix() function below.
451 # This matches the shortesthexnodeidprefix() function below.
451 node = repo.unfiltered().changelog._partialmatch(prefix)
452 node = repo.unfiltered().changelog._partialmatch(prefix)
452 except error.AmbiguousPrefixLookupError:
453 except error.AmbiguousPrefixLookupError:
453 revset = repo.ui.config('experimental', 'revisions.disambiguatewithin')
454 revset = repo.ui.config('experimental', 'revisions.disambiguatewithin')
454 if revset:
455 if revset:
455 # Clear config to avoid infinite recursion
456 # Clear config to avoid infinite recursion
456 configoverrides = {('experimental',
457 configoverrides = {('experimental',
457 'revisions.disambiguatewithin'): None}
458 'revisions.disambiguatewithin'): None}
458 with repo.ui.configoverride(configoverrides):
459 with repo.ui.configoverride(configoverrides):
459 revs = repo.anyrevs([revset], user=True)
460 revs = repo.anyrevs([revset], user=True)
460 matches = []
461 matches = []
461 for rev in revs:
462 for rev in revs:
462 node = repo.changelog.node(rev)
463 node = repo.changelog.node(rev)
463 if hex(node).startswith(prefix):
464 if hex(node).startswith(prefix):
464 matches.append(node)
465 matches.append(node)
465 if len(matches) == 1:
466 if len(matches) == 1:
466 return matches[0]
467 return matches[0]
467 raise
468 raise
468 if node is None:
469 if node is None:
469 return
470 return
470 repo.changelog.rev(node) # make sure node isn't filtered
471 repo.changelog.rev(node) # make sure node isn't filtered
471 return node
472 return node
472
473
473 def mayberevnum(repo, prefix):
474 def mayberevnum(repo, prefix):
474 """Checks if the given prefix may be mistaken for a revision number"""
475 """Checks if the given prefix may be mistaken for a revision number"""
475 try:
476 try:
476 i = int(prefix)
477 i = int(prefix)
477 # if we are a pure int, then starting with zero will not be
478 # if we are a pure int, then starting with zero will not be
478 # confused as a rev; or, obviously, if the int is larger
479 # confused as a rev; or, obviously, if the int is larger
479 # than the value of the tip rev
480 # than the value of the tip rev
480 if prefix[0:1] == b'0' or i >= len(repo):
481 if prefix[0:1] == b'0' or i >= len(repo):
481 return False
482 return False
482 return True
483 return True
483 except ValueError:
484 except ValueError:
484 return False
485 return False
485
486
486 def shortesthexnodeidprefix(repo, node, minlength=1, cache=None):
487 def shortesthexnodeidprefix(repo, node, minlength=1, cache=None):
487 """Find the shortest unambiguous prefix that matches hexnode.
488 """Find the shortest unambiguous prefix that matches hexnode.
488
489
489 If "cache" is not None, it must be a dictionary that can be used for
490 If "cache" is not None, it must be a dictionary that can be used for
490 caching between calls to this method.
491 caching between calls to this method.
491 """
492 """
492 # _partialmatch() of filtered changelog could take O(len(repo)) time,
493 # _partialmatch() of filtered changelog could take O(len(repo)) time,
493 # which would be unacceptably slow. so we look for hash collision in
494 # which would be unacceptably slow. so we look for hash collision in
494 # unfiltered space, which means some hashes may be slightly longer.
495 # unfiltered space, which means some hashes may be slightly longer.
495
496
496 def disambiguate(prefix):
497 def disambiguate(prefix):
497 """Disambiguate against revnums."""
498 """Disambiguate against revnums."""
498 if repo.ui.configbool('experimental', 'revisions.prefixhexnode'):
499 if repo.ui.configbool('experimental', 'revisions.prefixhexnode'):
499 if mayberevnum(repo, prefix):
500 if mayberevnum(repo, prefix):
500 return 'x' + prefix
501 return 'x' + prefix
501 else:
502 else:
502 return prefix
503 return prefix
503
504
504 hexnode = hex(node)
505 hexnode = hex(node)
505 for length in range(len(prefix), len(hexnode) + 1):
506 for length in range(len(prefix), len(hexnode) + 1):
506 prefix = hexnode[:length]
507 prefix = hexnode[:length]
507 if not mayberevnum(repo, prefix):
508 if not mayberevnum(repo, prefix):
508 return prefix
509 return prefix
509
510
510 cl = repo.unfiltered().changelog
511 cl = repo.unfiltered().changelog
511 revset = repo.ui.config('experimental', 'revisions.disambiguatewithin')
512 revset = repo.ui.config('experimental', 'revisions.disambiguatewithin')
512 if revset:
513 if revset:
513 revs = None
514 revs = None
514 if cache is not None:
515 if cache is not None:
515 revs = cache.get('disambiguationrevset')
516 revs = cache.get('disambiguationrevset')
516 if revs is None:
517 if revs is None:
517 revs = repo.anyrevs([revset], user=True)
518 revs = repo.anyrevs([revset], user=True)
518 if cache is not None:
519 if cache is not None:
519 cache['disambiguationrevset'] = revs
520 cache['disambiguationrevset'] = revs
520 if cl.rev(node) in revs:
521 if cl.rev(node) in revs:
521 hexnode = hex(node)
522 hexnode = hex(node)
522 nodetree = None
523 nodetree = None
523 if cache is not None:
524 if cache is not None:
524 nodetree = cache.get('disambiguationnodetree')
525 nodetree = cache.get('disambiguationnodetree')
525 if not nodetree:
526 if not nodetree:
526 try:
527 try:
527 nodetree = parsers.nodetree(cl.index, len(revs))
528 nodetree = parsers.nodetree(cl.index, len(revs))
528 except AttributeError:
529 except AttributeError:
529 # no native nodetree
530 # no native nodetree
530 pass
531 pass
531 else:
532 else:
532 for r in revs:
533 for r in revs:
533 nodetree.insert(r)
534 nodetree.insert(r)
534 if cache is not None:
535 if cache is not None:
535 cache['disambiguationnodetree'] = nodetree
536 cache['disambiguationnodetree'] = nodetree
536 if nodetree is not None:
537 if nodetree is not None:
537 length = max(nodetree.shortest(node), minlength)
538 length = max(nodetree.shortest(node), minlength)
538 prefix = hexnode[:length]
539 prefix = hexnode[:length]
539 return disambiguate(prefix)
540 return disambiguate(prefix)
540 for length in range(minlength, len(hexnode) + 1):
541 for length in range(minlength, len(hexnode) + 1):
541 matches = []
542 matches = []
542 prefix = hexnode[:length]
543 prefix = hexnode[:length]
543 for rev in revs:
544 for rev in revs:
544 otherhexnode = repo[rev].hex()
545 otherhexnode = repo[rev].hex()
545 if prefix == otherhexnode[:length]:
546 if prefix == otherhexnode[:length]:
546 matches.append(otherhexnode)
547 matches.append(otherhexnode)
547 if len(matches) == 1:
548 if len(matches) == 1:
548 return disambiguate(prefix)
549 return disambiguate(prefix)
549
550
550 try:
551 try:
551 return disambiguate(cl.shortest(node, minlength))
552 return disambiguate(cl.shortest(node, minlength))
552 except error.LookupError:
553 except error.LookupError:
553 raise error.RepoLookupError()
554 raise error.RepoLookupError()
554
555
555 def isrevsymbol(repo, symbol):
556 def isrevsymbol(repo, symbol):
556 """Checks if a symbol exists in the repo.
557 """Checks if a symbol exists in the repo.
557
558
558 See revsymbol() for details. Raises error.AmbiguousPrefixLookupError if the
559 See revsymbol() for details. Raises error.AmbiguousPrefixLookupError if the
559 symbol is an ambiguous nodeid prefix.
560 symbol is an ambiguous nodeid prefix.
560 """
561 """
561 try:
562 try:
562 revsymbol(repo, symbol)
563 revsymbol(repo, symbol)
563 return True
564 return True
564 except error.RepoLookupError:
565 except error.RepoLookupError:
565 return False
566 return False
566
567
567 def revsymbol(repo, symbol):
568 def revsymbol(repo, symbol):
568 """Returns a context given a single revision symbol (as string).
569 """Returns a context given a single revision symbol (as string).
569
570
570 This is similar to revsingle(), but accepts only a single revision symbol,
571 This is similar to revsingle(), but accepts only a single revision symbol,
571 i.e. things like ".", "tip", "1234", "deadbeef", "my-bookmark" work, but
572 i.e. things like ".", "tip", "1234", "deadbeef", "my-bookmark" work, but
572 not "max(public())".
573 not "max(public())".
573 """
574 """
574 if not isinstance(symbol, bytes):
575 if not isinstance(symbol, bytes):
575 msg = ("symbol (%s of type %s) was not a string, did you mean "
576 msg = ("symbol (%s of type %s) was not a string, did you mean "
576 "repo[symbol]?" % (symbol, type(symbol)))
577 "repo[symbol]?" % (symbol, type(symbol)))
577 raise error.ProgrammingError(msg)
578 raise error.ProgrammingError(msg)
578 try:
579 try:
579 if symbol in ('.', 'tip', 'null'):
580 if symbol in ('.', 'tip', 'null'):
580 return repo[symbol]
581 return repo[symbol]
581
582
582 try:
583 try:
583 r = int(symbol)
584 r = int(symbol)
584 if '%d' % r != symbol:
585 if '%d' % r != symbol:
585 raise ValueError
586 raise ValueError
586 l = len(repo.changelog)
587 l = len(repo.changelog)
587 if r < 0:
588 if r < 0:
588 r += l
589 r += l
589 if r < 0 or r >= l and r != wdirrev:
590 if r < 0 or r >= l and r != wdirrev:
590 raise ValueError
591 raise ValueError
591 return repo[r]
592 return repo[r]
592 except error.FilteredIndexError:
593 except error.FilteredIndexError:
593 raise
594 raise
594 except (ValueError, OverflowError, IndexError):
595 except (ValueError, OverflowError, IndexError):
595 pass
596 pass
596
597
597 if len(symbol) == 40:
598 if len(symbol) == 40:
598 try:
599 try:
599 node = bin(symbol)
600 node = bin(symbol)
600 rev = repo.changelog.rev(node)
601 rev = repo.changelog.rev(node)
601 return repo[rev]
602 return repo[rev]
602 except error.FilteredLookupError:
603 except error.FilteredLookupError:
603 raise
604 raise
604 except (TypeError, LookupError):
605 except (TypeError, LookupError):
605 pass
606 pass
606
607
607 # look up bookmarks through the name interface
608 # look up bookmarks through the name interface
608 try:
609 try:
609 node = repo.names.singlenode(repo, symbol)
610 node = repo.names.singlenode(repo, symbol)
610 rev = repo.changelog.rev(node)
611 rev = repo.changelog.rev(node)
611 return repo[rev]
612 return repo[rev]
612 except KeyError:
613 except KeyError:
613 pass
614 pass
614
615
615 node = resolvehexnodeidprefix(repo, symbol)
616 node = resolvehexnodeidprefix(repo, symbol)
616 if node is not None:
617 if node is not None:
617 rev = repo.changelog.rev(node)
618 rev = repo.changelog.rev(node)
618 return repo[rev]
619 return repo[rev]
619
620
620 raise error.RepoLookupError(_("unknown revision '%s'") % symbol)
621 raise error.RepoLookupError(_("unknown revision '%s'") % symbol)
621
622
622 except error.WdirUnsupported:
623 except error.WdirUnsupported:
623 return repo[None]
624 return repo[None]
624 except (error.FilteredIndexError, error.FilteredLookupError,
625 except (error.FilteredIndexError, error.FilteredLookupError,
625 error.FilteredRepoLookupError):
626 error.FilteredRepoLookupError):
626 raise _filterederror(repo, symbol)
627 raise _filterederror(repo, symbol)
627
628
628 def _filterederror(repo, changeid):
629 def _filterederror(repo, changeid):
629 """build an exception to be raised about a filtered changeid
630 """build an exception to be raised about a filtered changeid
630
631
631 This is extracted in a function to help extensions (eg: evolve) to
632 This is extracted in a function to help extensions (eg: evolve) to
632 experiment with various message variants."""
633 experiment with various message variants."""
633 if repo.filtername.startswith('visible'):
634 if repo.filtername.startswith('visible'):
634
635
635 # Check if the changeset is obsolete
636 # Check if the changeset is obsolete
636 unfilteredrepo = repo.unfiltered()
637 unfilteredrepo = repo.unfiltered()
637 ctx = revsymbol(unfilteredrepo, changeid)
638 ctx = revsymbol(unfilteredrepo, changeid)
638
639
639 # If the changeset is obsolete, enrich the message with the reason
640 # If the changeset is obsolete, enrich the message with the reason
640 # that made this changeset not visible
641 # that made this changeset not visible
641 if ctx.obsolete():
642 if ctx.obsolete():
642 msg = obsutil._getfilteredreason(repo, changeid, ctx)
643 msg = obsutil._getfilteredreason(repo, changeid, ctx)
643 else:
644 else:
644 msg = _("hidden revision '%s'") % changeid
645 msg = _("hidden revision '%s'") % changeid
645
646
646 hint = _('use --hidden to access hidden revisions')
647 hint = _('use --hidden to access hidden revisions')
647
648
648 return error.FilteredRepoLookupError(msg, hint=hint)
649 return error.FilteredRepoLookupError(msg, hint=hint)
649 msg = _("filtered revision '%s' (not in '%s' subset)")
650 msg = _("filtered revision '%s' (not in '%s' subset)")
650 msg %= (changeid, repo.filtername)
651 msg %= (changeid, repo.filtername)
651 return error.FilteredRepoLookupError(msg)
652 return error.FilteredRepoLookupError(msg)
652
653
653 def revsingle(repo, revspec, default='.', localalias=None):
654 def revsingle(repo, revspec, default='.', localalias=None):
654 if not revspec and revspec != 0:
655 if not revspec and revspec != 0:
655 return repo[default]
656 return repo[default]
656
657
657 l = revrange(repo, [revspec], localalias=localalias)
658 l = revrange(repo, [revspec], localalias=localalias)
658 if not l:
659 if not l:
659 raise error.Abort(_('empty revision set'))
660 raise error.Abort(_('empty revision set'))
660 return repo[l.last()]
661 return repo[l.last()]
661
662
662 def _pairspec(revspec):
663 def _pairspec(revspec):
663 tree = revsetlang.parse(revspec)
664 tree = revsetlang.parse(revspec)
664 return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall')
665 return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall')
665
666
666 def revpair(repo, revs):
667 def revpair(repo, revs):
667 if not revs:
668 if not revs:
668 return repo['.'], repo[None]
669 return repo['.'], repo[None]
669
670
670 l = revrange(repo, revs)
671 l = revrange(repo, revs)
671
672
672 if not l:
673 if not l:
673 first = second = None
674 first = second = None
674 elif l.isascending():
675 elif l.isascending():
675 first = l.min()
676 first = l.min()
676 second = l.max()
677 second = l.max()
677 elif l.isdescending():
678 elif l.isdescending():
678 first = l.max()
679 first = l.max()
679 second = l.min()
680 second = l.min()
680 else:
681 else:
681 first = l.first()
682 first = l.first()
682 second = l.last()
683 second = l.last()
683
684
684 if first is None:
685 if first is None:
685 raise error.Abort(_('empty revision range'))
686 raise error.Abort(_('empty revision range'))
686 if (first == second and len(revs) >= 2
687 if (first == second and len(revs) >= 2
687 and not all(revrange(repo, [r]) for r in revs)):
688 and not all(revrange(repo, [r]) for r in revs)):
688 raise error.Abort(_('empty revision on one side of range'))
689 raise error.Abort(_('empty revision on one side of range'))
689
690
690 # if top-level is range expression, the result must always be a pair
691 # if top-level is range expression, the result must always be a pair
691 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
692 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
692 return repo[first], repo[None]
693 return repo[first], repo[None]
693
694
694 return repo[first], repo[second]
695 return repo[first], repo[second]
695
696
696 def revrange(repo, specs, localalias=None):
697 def revrange(repo, specs, localalias=None):
697 """Execute 1 to many revsets and return the union.
698 """Execute 1 to many revsets and return the union.
698
699
699 This is the preferred mechanism for executing revsets using user-specified
700 This is the preferred mechanism for executing revsets using user-specified
700 config options, such as revset aliases.
701 config options, such as revset aliases.
701
702
702 The revsets specified by ``specs`` will be executed via a chained ``OR``
703 The revsets specified by ``specs`` will be executed via a chained ``OR``
703 expression. If ``specs`` is empty, an empty result is returned.
704 expression. If ``specs`` is empty, an empty result is returned.
704
705
705 ``specs`` can contain integers, in which case they are assumed to be
706 ``specs`` can contain integers, in which case they are assumed to be
706 revision numbers.
707 revision numbers.
707
708
708 It is assumed the revsets are already formatted. If you have arguments
709 It is assumed the revsets are already formatted. If you have arguments
709 that need to be expanded in the revset, call ``revsetlang.formatspec()``
710 that need to be expanded in the revset, call ``revsetlang.formatspec()``
710 and pass the result as an element of ``specs``.
711 and pass the result as an element of ``specs``.
711
712
712 Specifying a single revset is allowed.
713 Specifying a single revset is allowed.
713
714
714 Returns a ``revset.abstractsmartset`` which is a list-like interface over
715 Returns a ``revset.abstractsmartset`` which is a list-like interface over
715 integer revisions.
716 integer revisions.
716 """
717 """
717 allspecs = []
718 allspecs = []
718 for spec in specs:
719 for spec in specs:
719 if isinstance(spec, int):
720 if isinstance(spec, int):
720 spec = revsetlang.formatspec('rev(%d)', spec)
721 spec = revsetlang.formatspec('rev(%d)', spec)
721 allspecs.append(spec)
722 allspecs.append(spec)
722 return repo.anyrevs(allspecs, user=True, localalias=localalias)
723 return repo.anyrevs(allspecs, user=True, localalias=localalias)
723
724
724 def meaningfulparents(repo, ctx):
725 def meaningfulparents(repo, ctx):
725 """Return list of meaningful (or all if debug) parentrevs for rev.
726 """Return list of meaningful (or all if debug) parentrevs for rev.
726
727
727 For merges (two non-nullrev revisions) both parents are meaningful.
728 For merges (two non-nullrev revisions) both parents are meaningful.
728 Otherwise the first parent revision is considered meaningful if it
729 Otherwise the first parent revision is considered meaningful if it
729 is not the preceding revision.
730 is not the preceding revision.
730 """
731 """
731 parents = ctx.parents()
732 parents = ctx.parents()
732 if len(parents) > 1:
733 if len(parents) > 1:
733 return parents
734 return parents
734 if repo.ui.debugflag:
735 if repo.ui.debugflag:
735 return [parents[0], repo[nullrev]]
736 return [parents[0], repo[nullrev]]
736 if parents[0].rev() >= intrev(ctx) - 1:
737 if parents[0].rev() >= intrev(ctx) - 1:
737 return []
738 return []
738 return parents
739 return parents
739
740
740 def expandpats(pats):
741 def expandpats(pats):
741 '''Expand bare globs when running on windows.
742 '''Expand bare globs when running on windows.
742 On posix we assume it already has already been done by sh.'''
743 On posix we assume it already has already been done by sh.'''
743 if not util.expandglobs:
744 if not util.expandglobs:
744 return list(pats)
745 return list(pats)
745 ret = []
746 ret = []
746 for kindpat in pats:
747 for kindpat in pats:
747 kind, pat = matchmod._patsplit(kindpat, None)
748 kind, pat = matchmod._patsplit(kindpat, None)
748 if kind is None:
749 if kind is None:
749 try:
750 try:
750 globbed = glob.glob(pat)
751 globbed = glob.glob(pat)
751 except re.error:
752 except re.error:
752 globbed = [pat]
753 globbed = [pat]
753 if globbed:
754 if globbed:
754 ret.extend(globbed)
755 ret.extend(globbed)
755 continue
756 continue
756 ret.append(kindpat)
757 ret.append(kindpat)
757 return ret
758 return ret
758
759
759 def matchandpats(ctx, pats=(), opts=None, globbed=False, default='relpath',
760 def matchandpats(ctx, pats=(), opts=None, globbed=False, default='relpath',
760 badfn=None):
761 badfn=None):
761 '''Return a matcher and the patterns that were used.
762 '''Return a matcher and the patterns that were used.
762 The matcher will warn about bad matches, unless an alternate badfn callback
763 The matcher will warn about bad matches, unless an alternate badfn callback
763 is provided.'''
764 is provided.'''
764 if pats == ("",):
765 if pats == ("",):
765 pats = []
766 pats = []
766 if opts is None:
767 if opts is None:
767 opts = {}
768 opts = {}
768 if not globbed and default == 'relpath':
769 if not globbed and default == 'relpath':
769 pats = expandpats(pats or [])
770 pats = expandpats(pats or [])
770
771
771 def bad(f, msg):
772 def bad(f, msg):
772 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
773 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
773
774
774 if badfn is None:
775 if badfn is None:
775 badfn = bad
776 badfn = bad
776
777
777 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
778 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
778 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
779 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
779
780
780 if m.always():
781 if m.always():
781 pats = []
782 pats = []
782 return m, pats
783 return m, pats
783
784
784 def match(ctx, pats=(), opts=None, globbed=False, default='relpath',
785 def match(ctx, pats=(), opts=None, globbed=False, default='relpath',
785 badfn=None):
786 badfn=None):
786 '''Return a matcher that will warn about bad matches.'''
787 '''Return a matcher that will warn about bad matches.'''
787 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
788 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
788
789
789 def matchall(repo):
790 def matchall(repo):
790 '''Return a matcher that will efficiently match everything.'''
791 '''Return a matcher that will efficiently match everything.'''
791 return matchmod.always(repo.root, repo.getcwd())
792 return matchmod.always(repo.root, repo.getcwd())
792
793
793 def matchfiles(repo, files, badfn=None):
794 def matchfiles(repo, files, badfn=None):
794 '''Return a matcher that will efficiently match exactly these files.'''
795 '''Return a matcher that will efficiently match exactly these files.'''
795 return matchmod.exact(repo.root, repo.getcwd(), files, badfn=badfn)
796 return matchmod.exact(repo.root, repo.getcwd(), files, badfn=badfn)
796
797
797 def parsefollowlinespattern(repo, rev, pat, msg):
798 def parsefollowlinespattern(repo, rev, pat, msg):
798 """Return a file name from `pat` pattern suitable for usage in followlines
799 """Return a file name from `pat` pattern suitable for usage in followlines
799 logic.
800 logic.
800 """
801 """
801 if not matchmod.patkind(pat):
802 if not matchmod.patkind(pat):
802 return pathutil.canonpath(repo.root, repo.getcwd(), pat)
803 return pathutil.canonpath(repo.root, repo.getcwd(), pat)
803 else:
804 else:
804 ctx = repo[rev]
805 ctx = repo[rev]
805 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=ctx)
806 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=ctx)
806 files = [f for f in ctx if m(f)]
807 files = [f for f in ctx if m(f)]
807 if len(files) != 1:
808 if len(files) != 1:
808 raise error.ParseError(msg)
809 raise error.ParseError(msg)
809 return files[0]
810 return files[0]
810
811
811 def origpath(ui, repo, filepath):
812 def origpath(ui, repo, filepath):
812 '''customize where .orig files are created
813 '''customize where .orig files are created
813
814
814 Fetch user defined path from config file: [ui] origbackuppath = <path>
815 Fetch user defined path from config file: [ui] origbackuppath = <path>
815 Fall back to default (filepath with .orig suffix) if not specified
816 Fall back to default (filepath with .orig suffix) if not specified
816 '''
817 '''
817 origbackuppath = ui.config('ui', 'origbackuppath')
818 origbackuppath = ui.config('ui', 'origbackuppath')
818 if not origbackuppath:
819 if not origbackuppath:
819 return filepath + ".orig"
820 return filepath + ".orig"
820
821
821 # Convert filepath from an absolute path into a path inside the repo.
822 # Convert filepath from an absolute path into a path inside the repo.
822 filepathfromroot = util.normpath(os.path.relpath(filepath,
823 filepathfromroot = util.normpath(os.path.relpath(filepath,
823 start=repo.root))
824 start=repo.root))
824
825
825 origvfs = vfs.vfs(repo.wjoin(origbackuppath))
826 origvfs = vfs.vfs(repo.wjoin(origbackuppath))
826 origbackupdir = origvfs.dirname(filepathfromroot)
827 origbackupdir = origvfs.dirname(filepathfromroot)
827 if not origvfs.isdir(origbackupdir) or origvfs.islink(origbackupdir):
828 if not origvfs.isdir(origbackupdir) or origvfs.islink(origbackupdir):
828 ui.note(_('creating directory: %s\n') % origvfs.join(origbackupdir))
829 ui.note(_('creating directory: %s\n') % origvfs.join(origbackupdir))
829
830
830 # Remove any files that conflict with the backup file's path
831 # Remove any files that conflict with the backup file's path
831 for f in reversed(list(util.finddirs(filepathfromroot))):
832 for f in reversed(list(util.finddirs(filepathfromroot))):
832 if origvfs.isfileorlink(f):
833 if origvfs.isfileorlink(f):
833 ui.note(_('removing conflicting file: %s\n')
834 ui.note(_('removing conflicting file: %s\n')
834 % origvfs.join(f))
835 % origvfs.join(f))
835 origvfs.unlink(f)
836 origvfs.unlink(f)
836 break
837 break
837
838
838 origvfs.makedirs(origbackupdir)
839 origvfs.makedirs(origbackupdir)
839
840
840 if origvfs.isdir(filepathfromroot) and not origvfs.islink(filepathfromroot):
841 if origvfs.isdir(filepathfromroot) and not origvfs.islink(filepathfromroot):
841 ui.note(_('removing conflicting directory: %s\n')
842 ui.note(_('removing conflicting directory: %s\n')
842 % origvfs.join(filepathfromroot))
843 % origvfs.join(filepathfromroot))
843 origvfs.rmtree(filepathfromroot, forcibly=True)
844 origvfs.rmtree(filepathfromroot, forcibly=True)
844
845
845 return origvfs.join(filepathfromroot)
846 return origvfs.join(filepathfromroot)
846
847
847 class _containsnode(object):
848 class _containsnode(object):
848 """proxy __contains__(node) to container.__contains__ which accepts revs"""
849 """proxy __contains__(node) to container.__contains__ which accepts revs"""
849
850
850 def __init__(self, repo, revcontainer):
851 def __init__(self, repo, revcontainer):
851 self._torev = repo.changelog.rev
852 self._torev = repo.changelog.rev
852 self._revcontains = revcontainer.__contains__
853 self._revcontains = revcontainer.__contains__
853
854
854 def __contains__(self, node):
855 def __contains__(self, node):
855 return self._revcontains(self._torev(node))
856 return self._revcontains(self._torev(node))
856
857
857 def cleanupnodes(repo, replacements, operation, moves=None, metadata=None,
858 def cleanupnodes(repo, replacements, operation, moves=None, metadata=None,
858 fixphase=False, targetphase=None, backup=True):
859 fixphase=False, targetphase=None, backup=True):
859 """do common cleanups when old nodes are replaced by new nodes
860 """do common cleanups when old nodes are replaced by new nodes
860
861
861 That includes writing obsmarkers or stripping nodes, and moving bookmarks.
862 That includes writing obsmarkers or stripping nodes, and moving bookmarks.
862 (we might also want to move working directory parent in the future)
863 (we might also want to move working directory parent in the future)
863
864
864 By default, bookmark moves are calculated automatically from 'replacements',
865 By default, bookmark moves are calculated automatically from 'replacements',
865 but 'moves' can be used to override that. Also, 'moves' may include
866 but 'moves' can be used to override that. Also, 'moves' may include
866 additional bookmark moves that should not have associated obsmarkers.
867 additional bookmark moves that should not have associated obsmarkers.
867
868
868 replacements is {oldnode: [newnode]} or a iterable of nodes if they do not
869 replacements is {oldnode: [newnode]} or a iterable of nodes if they do not
869 have replacements. operation is a string, like "rebase".
870 have replacements. operation is a string, like "rebase".
870
871
871 metadata is dictionary containing metadata to be stored in obsmarker if
872 metadata is dictionary containing metadata to be stored in obsmarker if
872 obsolescence is enabled.
873 obsolescence is enabled.
873 """
874 """
874 assert fixphase or targetphase is None
875 assert fixphase or targetphase is None
875 if not replacements and not moves:
876 if not replacements and not moves:
876 return
877 return
877
878
878 # translate mapping's other forms
879 # translate mapping's other forms
879 if not util.safehasattr(replacements, 'items'):
880 if not util.safehasattr(replacements, 'items'):
880 replacements = {(n,): () for n in replacements}
881 replacements = {(n,): () for n in replacements}
881 else:
882 else:
882 # upgrading non tuple "source" to tuple ones for BC
883 # upgrading non tuple "source" to tuple ones for BC
883 repls = {}
884 repls = {}
884 for key, value in replacements.items():
885 for key, value in replacements.items():
885 if not isinstance(key, tuple):
886 if not isinstance(key, tuple):
886 key = (key,)
887 key = (key,)
887 repls[key] = value
888 repls[key] = value
888 replacements = repls
889 replacements = repls
889
890
890 # Calculate bookmark movements
891 # Calculate bookmark movements
891 if moves is None:
892 if moves is None:
892 moves = {}
893 moves = {}
893 # Unfiltered repo is needed since nodes in replacements might be hidden.
894 # Unfiltered repo is needed since nodes in replacements might be hidden.
894 unfi = repo.unfiltered()
895 unfi = repo.unfiltered()
895 for oldnodes, newnodes in replacements.items():
896 for oldnodes, newnodes in replacements.items():
896 for oldnode in oldnodes:
897 for oldnode in oldnodes:
897 if oldnode in moves:
898 if oldnode in moves:
898 continue
899 continue
899 if len(newnodes) > 1:
900 if len(newnodes) > 1:
900 # usually a split, take the one with biggest rev number
901 # usually a split, take the one with biggest rev number
901 newnode = next(unfi.set('max(%ln)', newnodes)).node()
902 newnode = next(unfi.set('max(%ln)', newnodes)).node()
902 elif len(newnodes) == 0:
903 elif len(newnodes) == 0:
903 # move bookmark backwards
904 # move bookmark backwards
904 allreplaced = []
905 allreplaced = []
905 for rep in replacements:
906 for rep in replacements:
906 allreplaced.extend(rep)
907 allreplaced.extend(rep)
907 roots = list(unfi.set('max((::%n) - %ln)', oldnode,
908 roots = list(unfi.set('max((::%n) - %ln)', oldnode,
908 allreplaced))
909 allreplaced))
909 if roots:
910 if roots:
910 newnode = roots[0].node()
911 newnode = roots[0].node()
911 else:
912 else:
912 newnode = nullid
913 newnode = nullid
913 else:
914 else:
914 newnode = newnodes[0]
915 newnode = newnodes[0]
915 moves[oldnode] = newnode
916 moves[oldnode] = newnode
916
917
917 allnewnodes = [n for ns in replacements.values() for n in ns]
918 allnewnodes = [n for ns in replacements.values() for n in ns]
918 toretract = {}
919 toretract = {}
919 toadvance = {}
920 toadvance = {}
920 if fixphase:
921 if fixphase:
921 precursors = {}
922 precursors = {}
922 for oldnodes, newnodes in replacements.items():
923 for oldnodes, newnodes in replacements.items():
923 for oldnode in oldnodes:
924 for oldnode in oldnodes:
924 for newnode in newnodes:
925 for newnode in newnodes:
925 precursors.setdefault(newnode, []).append(oldnode)
926 precursors.setdefault(newnode, []).append(oldnode)
926
927
927 allnewnodes.sort(key=lambda n: unfi[n].rev())
928 allnewnodes.sort(key=lambda n: unfi[n].rev())
928 newphases = {}
929 newphases = {}
929 def phase(ctx):
930 def phase(ctx):
930 return newphases.get(ctx.node(), ctx.phase())
931 return newphases.get(ctx.node(), ctx.phase())
931 for newnode in allnewnodes:
932 for newnode in allnewnodes:
932 ctx = unfi[newnode]
933 ctx = unfi[newnode]
933 parentphase = max(phase(p) for p in ctx.parents())
934 parentphase = max(phase(p) for p in ctx.parents())
934 if targetphase is None:
935 if targetphase is None:
935 oldphase = max(unfi[oldnode].phase()
936 oldphase = max(unfi[oldnode].phase()
936 for oldnode in precursors[newnode])
937 for oldnode in precursors[newnode])
937 newphase = max(oldphase, parentphase)
938 newphase = max(oldphase, parentphase)
938 else:
939 else:
939 newphase = max(targetphase, parentphase)
940 newphase = max(targetphase, parentphase)
940 newphases[newnode] = newphase
941 newphases[newnode] = newphase
941 if newphase > ctx.phase():
942 if newphase > ctx.phase():
942 toretract.setdefault(newphase, []).append(newnode)
943 toretract.setdefault(newphase, []).append(newnode)
943 elif newphase < ctx.phase():
944 elif newphase < ctx.phase():
944 toadvance.setdefault(newphase, []).append(newnode)
945 toadvance.setdefault(newphase, []).append(newnode)
945
946
946 with repo.transaction('cleanup') as tr:
947 with repo.transaction('cleanup') as tr:
947 # Move bookmarks
948 # Move bookmarks
948 bmarks = repo._bookmarks
949 bmarks = repo._bookmarks
949 bmarkchanges = []
950 bmarkchanges = []
950 for oldnode, newnode in moves.items():
951 for oldnode, newnode in moves.items():
951 oldbmarks = repo.nodebookmarks(oldnode)
952 oldbmarks = repo.nodebookmarks(oldnode)
952 if not oldbmarks:
953 if not oldbmarks:
953 continue
954 continue
954 from . import bookmarks # avoid import cycle
955 from . import bookmarks # avoid import cycle
955 repo.ui.debug('moving bookmarks %r from %s to %s\n' %
956 repo.ui.debug('moving bookmarks %r from %s to %s\n' %
956 (pycompat.rapply(pycompat.maybebytestr, oldbmarks),
957 (pycompat.rapply(pycompat.maybebytestr, oldbmarks),
957 hex(oldnode), hex(newnode)))
958 hex(oldnode), hex(newnode)))
958 # Delete divergent bookmarks being parents of related newnodes
959 # Delete divergent bookmarks being parents of related newnodes
959 deleterevs = repo.revs('parents(roots(%ln & (::%n))) - parents(%n)',
960 deleterevs = repo.revs('parents(roots(%ln & (::%n))) - parents(%n)',
960 allnewnodes, newnode, oldnode)
961 allnewnodes, newnode, oldnode)
961 deletenodes = _containsnode(repo, deleterevs)
962 deletenodes = _containsnode(repo, deleterevs)
962 for name in oldbmarks:
963 for name in oldbmarks:
963 bmarkchanges.append((name, newnode))
964 bmarkchanges.append((name, newnode))
964 for b in bookmarks.divergent2delete(repo, deletenodes, name):
965 for b in bookmarks.divergent2delete(repo, deletenodes, name):
965 bmarkchanges.append((b, None))
966 bmarkchanges.append((b, None))
966
967
967 if bmarkchanges:
968 if bmarkchanges:
968 bmarks.applychanges(repo, tr, bmarkchanges)
969 bmarks.applychanges(repo, tr, bmarkchanges)
969
970
970 for phase, nodes in toretract.items():
971 for phase, nodes in toretract.items():
971 phases.retractboundary(repo, tr, phase, nodes)
972 phases.retractboundary(repo, tr, phase, nodes)
972 for phase, nodes in toadvance.items():
973 for phase, nodes in toadvance.items():
973 phases.advanceboundary(repo, tr, phase, nodes)
974 phases.advanceboundary(repo, tr, phase, nodes)
974
975
975 # Obsolete or strip nodes
976 # Obsolete or strip nodes
976 if obsolete.isenabled(repo, obsolete.createmarkersopt):
977 if obsolete.isenabled(repo, obsolete.createmarkersopt):
977 # If a node is already obsoleted, and we want to obsolete it
978 # If a node is already obsoleted, and we want to obsolete it
978 # without a successor, skip that obssolete request since it's
979 # without a successor, skip that obssolete request since it's
979 # unnecessary. That's the "if s or not isobs(n)" check below.
980 # unnecessary. That's the "if s or not isobs(n)" check below.
980 # Also sort the node in topology order, that might be useful for
981 # Also sort the node in topology order, that might be useful for
981 # some obsstore logic.
982 # some obsstore logic.
982 # NOTE: the sorting might belong to createmarkers.
983 # NOTE: the sorting might belong to createmarkers.
983 torev = unfi.changelog.rev
984 torev = unfi.changelog.rev
984 sortfunc = lambda ns: torev(ns[0][0])
985 sortfunc = lambda ns: torev(ns[0][0])
985 rels = []
986 rels = []
986 for ns, s in sorted(replacements.items(), key=sortfunc):
987 for ns, s in sorted(replacements.items(), key=sortfunc):
987 rel = (tuple(unfi[n] for n in ns), tuple(unfi[m] for m in s))
988 rel = (tuple(unfi[n] for n in ns), tuple(unfi[m] for m in s))
988 rels.append(rel)
989 rels.append(rel)
989 if rels:
990 if rels:
990 obsolete.createmarkers(repo, rels, operation=operation,
991 obsolete.createmarkers(repo, rels, operation=operation,
991 metadata=metadata)
992 metadata=metadata)
992 else:
993 else:
993 from . import repair # avoid import cycle
994 from . import repair # avoid import cycle
994 tostrip = list(n for ns in replacements for n in ns)
995 tostrip = list(n for ns in replacements for n in ns)
995 if tostrip:
996 if tostrip:
996 repair.delayedstrip(repo.ui, repo, tostrip, operation,
997 repair.delayedstrip(repo.ui, repo, tostrip, operation,
997 backup=backup)
998 backup=backup)
998
999
999 def addremove(repo, matcher, prefix, opts=None):
1000 def addremove(repo, matcher, prefix, opts=None):
1000 if opts is None:
1001 if opts is None:
1001 opts = {}
1002 opts = {}
1002 m = matcher
1003 m = matcher
1003 dry_run = opts.get('dry_run')
1004 dry_run = opts.get('dry_run')
1004 try:
1005 try:
1005 similarity = float(opts.get('similarity') or 0)
1006 similarity = float(opts.get('similarity') or 0)
1006 except ValueError:
1007 except ValueError:
1007 raise error.Abort(_('similarity must be a number'))
1008 raise error.Abort(_('similarity must be a number'))
1008 if similarity < 0 or similarity > 100:
1009 if similarity < 0 or similarity > 100:
1009 raise error.Abort(_('similarity must be between 0 and 100'))
1010 raise error.Abort(_('similarity must be between 0 and 100'))
1010 similarity /= 100.0
1011 similarity /= 100.0
1011
1012
1012 ret = 0
1013 ret = 0
1013 join = lambda f: os.path.join(prefix, f)
1014 join = lambda f: os.path.join(prefix, f)
1014
1015
1015 wctx = repo[None]
1016 wctx = repo[None]
1016 for subpath in sorted(wctx.substate):
1017 for subpath in sorted(wctx.substate):
1017 submatch = matchmod.subdirmatcher(subpath, m)
1018 submatch = matchmod.subdirmatcher(subpath, m)
1018 if opts.get('subrepos') or m.exact(subpath) or any(submatch.files()):
1019 if opts.get('subrepos') or m.exact(subpath) or any(submatch.files()):
1019 sub = wctx.sub(subpath)
1020 sub = wctx.sub(subpath)
1020 try:
1021 try:
1021 if sub.addremove(submatch, prefix, opts):
1022 if sub.addremove(submatch, prefix, opts):
1022 ret = 1
1023 ret = 1
1023 except error.LookupError:
1024 except error.LookupError:
1024 repo.ui.status(_("skipping missing subrepository: %s\n")
1025 repo.ui.status(_("skipping missing subrepository: %s\n")
1025 % join(subpath))
1026 % join(subpath))
1026
1027
1027 rejected = []
1028 rejected = []
1028 def badfn(f, msg):
1029 def badfn(f, msg):
1029 if f in m.files():
1030 if f in m.files():
1030 m.bad(f, msg)
1031 m.bad(f, msg)
1031 rejected.append(f)
1032 rejected.append(f)
1032
1033
1033 badmatch = matchmod.badmatch(m, badfn)
1034 badmatch = matchmod.badmatch(m, badfn)
1034 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
1035 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
1035 badmatch)
1036 badmatch)
1036
1037
1037 unknownset = set(unknown + forgotten)
1038 unknownset = set(unknown + forgotten)
1038 toprint = unknownset.copy()
1039 toprint = unknownset.copy()
1039 toprint.update(deleted)
1040 toprint.update(deleted)
1040 for abs in sorted(toprint):
1041 for abs in sorted(toprint):
1041 if repo.ui.verbose or not m.exact(abs):
1042 if repo.ui.verbose or not m.exact(abs):
1042 if abs in unknownset:
1043 if abs in unknownset:
1043 status = _('adding %s\n') % m.uipath(abs)
1044 status = _('adding %s\n') % m.uipath(abs)
1044 label = 'addremove.added'
1045 label = 'addremove.added'
1045 else:
1046 else:
1046 status = _('removing %s\n') % m.uipath(abs)
1047 status = _('removing %s\n') % m.uipath(abs)
1047 label = 'addremove.removed'
1048 label = 'addremove.removed'
1048 repo.ui.status(status, label=label)
1049 repo.ui.status(status, label=label)
1049
1050
1050 renames = _findrenames(repo, m, added + unknown, removed + deleted,
1051 renames = _findrenames(repo, m, added + unknown, removed + deleted,
1051 similarity)
1052 similarity)
1052
1053
1053 if not dry_run:
1054 if not dry_run:
1054 _markchanges(repo, unknown + forgotten, deleted, renames)
1055 _markchanges(repo, unknown + forgotten, deleted, renames)
1055
1056
1056 for f in rejected:
1057 for f in rejected:
1057 if f in m.files():
1058 if f in m.files():
1058 return 1
1059 return 1
1059 return ret
1060 return ret
1060
1061
1061 def marktouched(repo, files, similarity=0.0):
1062 def marktouched(repo, files, similarity=0.0):
1062 '''Assert that files have somehow been operated upon. files are relative to
1063 '''Assert that files have somehow been operated upon. files are relative to
1063 the repo root.'''
1064 the repo root.'''
1064 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
1065 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
1065 rejected = []
1066 rejected = []
1066
1067
1067 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
1068 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
1068
1069
1069 if repo.ui.verbose:
1070 if repo.ui.verbose:
1070 unknownset = set(unknown + forgotten)
1071 unknownset = set(unknown + forgotten)
1071 toprint = unknownset.copy()
1072 toprint = unknownset.copy()
1072 toprint.update(deleted)
1073 toprint.update(deleted)
1073 for abs in sorted(toprint):
1074 for abs in sorted(toprint):
1074 if abs in unknownset:
1075 if abs in unknownset:
1075 status = _('adding %s\n') % abs
1076 status = _('adding %s\n') % abs
1076 else:
1077 else:
1077 status = _('removing %s\n') % abs
1078 status = _('removing %s\n') % abs
1078 repo.ui.status(status)
1079 repo.ui.status(status)
1079
1080
1080 renames = _findrenames(repo, m, added + unknown, removed + deleted,
1081 renames = _findrenames(repo, m, added + unknown, removed + deleted,
1081 similarity)
1082 similarity)
1082
1083
1083 _markchanges(repo, unknown + forgotten, deleted, renames)
1084 _markchanges(repo, unknown + forgotten, deleted, renames)
1084
1085
1085 for f in rejected:
1086 for f in rejected:
1086 if f in m.files():
1087 if f in m.files():
1087 return 1
1088 return 1
1088 return 0
1089 return 0
1089
1090
1090 def _interestingfiles(repo, matcher):
1091 def _interestingfiles(repo, matcher):
1091 '''Walk dirstate with matcher, looking for files that addremove would care
1092 '''Walk dirstate with matcher, looking for files that addremove would care
1092 about.
1093 about.
1093
1094
1094 This is different from dirstate.status because it doesn't care about
1095 This is different from dirstate.status because it doesn't care about
1095 whether files are modified or clean.'''
1096 whether files are modified or clean.'''
1096 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1097 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1097 audit_path = pathutil.pathauditor(repo.root, cached=True)
1098 audit_path = pathutil.pathauditor(repo.root, cached=True)
1098
1099
1099 ctx = repo[None]
1100 ctx = repo[None]
1100 dirstate = repo.dirstate
1101 dirstate = repo.dirstate
1101 matcher = repo.narrowmatch(matcher, includeexact=True)
1102 matcher = repo.narrowmatch(matcher, includeexact=True)
1102 walkresults = dirstate.walk(matcher, subrepos=sorted(ctx.substate),
1103 walkresults = dirstate.walk(matcher, subrepos=sorted(ctx.substate),
1103 unknown=True, ignored=False, full=False)
1104 unknown=True, ignored=False, full=False)
1104 for abs, st in walkresults.iteritems():
1105 for abs, st in walkresults.iteritems():
1105 dstate = dirstate[abs]
1106 dstate = dirstate[abs]
1106 if dstate == '?' and audit_path.check(abs):
1107 if dstate == '?' and audit_path.check(abs):
1107 unknown.append(abs)
1108 unknown.append(abs)
1108 elif dstate != 'r' and not st:
1109 elif dstate != 'r' and not st:
1109 deleted.append(abs)
1110 deleted.append(abs)
1110 elif dstate == 'r' and st:
1111 elif dstate == 'r' and st:
1111 forgotten.append(abs)
1112 forgotten.append(abs)
1112 # for finding renames
1113 # for finding renames
1113 elif dstate == 'r' and not st:
1114 elif dstate == 'r' and not st:
1114 removed.append(abs)
1115 removed.append(abs)
1115 elif dstate == 'a':
1116 elif dstate == 'a':
1116 added.append(abs)
1117 added.append(abs)
1117
1118
1118 return added, unknown, deleted, removed, forgotten
1119 return added, unknown, deleted, removed, forgotten
1119
1120
1120 def _findrenames(repo, matcher, added, removed, similarity):
1121 def _findrenames(repo, matcher, added, removed, similarity):
1121 '''Find renames from removed files to added ones.'''
1122 '''Find renames from removed files to added ones.'''
1122 renames = {}
1123 renames = {}
1123 if similarity > 0:
1124 if similarity > 0:
1124 for old, new, score in similar.findrenames(repo, added, removed,
1125 for old, new, score in similar.findrenames(repo, added, removed,
1125 similarity):
1126 similarity):
1126 if (repo.ui.verbose or not matcher.exact(old)
1127 if (repo.ui.verbose or not matcher.exact(old)
1127 or not matcher.exact(new)):
1128 or not matcher.exact(new)):
1128 repo.ui.status(_('recording removal of %s as rename to %s '
1129 repo.ui.status(_('recording removal of %s as rename to %s '
1129 '(%d%% similar)\n') %
1130 '(%d%% similar)\n') %
1130 (matcher.rel(old), matcher.rel(new),
1131 (matcher.rel(old), matcher.rel(new),
1131 score * 100))
1132 score * 100))
1132 renames[new] = old
1133 renames[new] = old
1133 return renames
1134 return renames
1134
1135
1135 def _markchanges(repo, unknown, deleted, renames):
1136 def _markchanges(repo, unknown, deleted, renames):
1136 '''Marks the files in unknown as added, the files in deleted as removed,
1137 '''Marks the files in unknown as added, the files in deleted as removed,
1137 and the files in renames as copied.'''
1138 and the files in renames as copied.'''
1138 wctx = repo[None]
1139 wctx = repo[None]
1139 with repo.wlock():
1140 with repo.wlock():
1140 wctx.forget(deleted)
1141 wctx.forget(deleted)
1141 wctx.add(unknown)
1142 wctx.add(unknown)
1142 for new, old in renames.iteritems():
1143 for new, old in renames.iteritems():
1143 wctx.copy(old, new)
1144 wctx.copy(old, new)
1144
1145
1145 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1146 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1146 """Update the dirstate to reflect the intent of copying src to dst. For
1147 """Update the dirstate to reflect the intent of copying src to dst. For
1147 different reasons it might not end with dst being marked as copied from src.
1148 different reasons it might not end with dst being marked as copied from src.
1148 """
1149 """
1149 origsrc = repo.dirstate.copied(src) or src
1150 origsrc = repo.dirstate.copied(src) or src
1150 if dst == origsrc: # copying back a copy?
1151 if dst == origsrc: # copying back a copy?
1151 if repo.dirstate[dst] not in 'mn' and not dryrun:
1152 if repo.dirstate[dst] not in 'mn' and not dryrun:
1152 repo.dirstate.normallookup(dst)
1153 repo.dirstate.normallookup(dst)
1153 else:
1154 else:
1154 if repo.dirstate[origsrc] == 'a' and origsrc == src:
1155 if repo.dirstate[origsrc] == 'a' and origsrc == src:
1155 if not ui.quiet:
1156 if not ui.quiet:
1156 ui.warn(_("%s has not been committed yet, so no copy "
1157 ui.warn(_("%s has not been committed yet, so no copy "
1157 "data will be stored for %s.\n")
1158 "data will be stored for %s.\n")
1158 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
1159 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
1159 if repo.dirstate[dst] in '?r' and not dryrun:
1160 if repo.dirstate[dst] in '?r' and not dryrun:
1160 wctx.add([dst])
1161 wctx.add([dst])
1161 elif not dryrun:
1162 elif not dryrun:
1162 wctx.copy(origsrc, dst)
1163 wctx.copy(origsrc, dst)
1163
1164
1164 def writerequires(opener, requirements):
1165 def writerequires(opener, requirements):
1165 with opener('requires', 'w') as fp:
1166 with opener('requires', 'w') as fp:
1166 for r in sorted(requirements):
1167 for r in sorted(requirements):
1167 fp.write("%s\n" % r)
1168 fp.write("%s\n" % r)
1168
1169
1169 class filecachesubentry(object):
1170 class filecachesubentry(object):
1170 def __init__(self, path, stat):
1171 def __init__(self, path, stat):
1171 self.path = path
1172 self.path = path
1172 self.cachestat = None
1173 self.cachestat = None
1173 self._cacheable = None
1174 self._cacheable = None
1174
1175
1175 if stat:
1176 if stat:
1176 self.cachestat = filecachesubentry.stat(self.path)
1177 self.cachestat = filecachesubentry.stat(self.path)
1177
1178
1178 if self.cachestat:
1179 if self.cachestat:
1179 self._cacheable = self.cachestat.cacheable()
1180 self._cacheable = self.cachestat.cacheable()
1180 else:
1181 else:
1181 # None means we don't know yet
1182 # None means we don't know yet
1182 self._cacheable = None
1183 self._cacheable = None
1183
1184
1184 def refresh(self):
1185 def refresh(self):
1185 if self.cacheable():
1186 if self.cacheable():
1186 self.cachestat = filecachesubentry.stat(self.path)
1187 self.cachestat = filecachesubentry.stat(self.path)
1187
1188
1188 def cacheable(self):
1189 def cacheable(self):
1189 if self._cacheable is not None:
1190 if self._cacheable is not None:
1190 return self._cacheable
1191 return self._cacheable
1191
1192
1192 # we don't know yet, assume it is for now
1193 # we don't know yet, assume it is for now
1193 return True
1194 return True
1194
1195
1195 def changed(self):
1196 def changed(self):
1196 # no point in going further if we can't cache it
1197 # no point in going further if we can't cache it
1197 if not self.cacheable():
1198 if not self.cacheable():
1198 return True
1199 return True
1199
1200
1200 newstat = filecachesubentry.stat(self.path)
1201 newstat = filecachesubentry.stat(self.path)
1201
1202
1202 # we may not know if it's cacheable yet, check again now
1203 # we may not know if it's cacheable yet, check again now
1203 if newstat and self._cacheable is None:
1204 if newstat and self._cacheable is None:
1204 self._cacheable = newstat.cacheable()
1205 self._cacheable = newstat.cacheable()
1205
1206
1206 # check again
1207 # check again
1207 if not self._cacheable:
1208 if not self._cacheable:
1208 return True
1209 return True
1209
1210
1210 if self.cachestat != newstat:
1211 if self.cachestat != newstat:
1211 self.cachestat = newstat
1212 self.cachestat = newstat
1212 return True
1213 return True
1213 else:
1214 else:
1214 return False
1215 return False
1215
1216
1216 @staticmethod
1217 @staticmethod
1217 def stat(path):
1218 def stat(path):
1218 try:
1219 try:
1219 return util.cachestat(path)
1220 return util.cachestat(path)
1220 except OSError as e:
1221 except OSError as e:
1221 if e.errno != errno.ENOENT:
1222 if e.errno != errno.ENOENT:
1222 raise
1223 raise
1223
1224
1224 class filecacheentry(object):
1225 class filecacheentry(object):
1225 def __init__(self, paths, stat=True):
1226 def __init__(self, paths, stat=True):
1226 self._entries = []
1227 self._entries = []
1227 for path in paths:
1228 for path in paths:
1228 self._entries.append(filecachesubentry(path, stat))
1229 self._entries.append(filecachesubentry(path, stat))
1229
1230
1230 def changed(self):
1231 def changed(self):
1231 '''true if any entry has changed'''
1232 '''true if any entry has changed'''
1232 for entry in self._entries:
1233 for entry in self._entries:
1233 if entry.changed():
1234 if entry.changed():
1234 return True
1235 return True
1235 return False
1236 return False
1236
1237
1237 def refresh(self):
1238 def refresh(self):
1238 for entry in self._entries:
1239 for entry in self._entries:
1239 entry.refresh()
1240 entry.refresh()
1240
1241
1241 class filecache(object):
1242 class filecache(object):
1242 """A property like decorator that tracks files under .hg/ for updates.
1243 """A property like decorator that tracks files under .hg/ for updates.
1243
1244
1244 On first access, the files defined as arguments are stat()ed and the
1245 On first access, the files defined as arguments are stat()ed and the
1245 results cached. The decorated function is called. The results are stashed
1246 results cached. The decorated function is called. The results are stashed
1246 away in a ``_filecache`` dict on the object whose method is decorated.
1247 away in a ``_filecache`` dict on the object whose method is decorated.
1247
1248
1248 On subsequent access, the cached result is returned.
1249 On subsequent access, the cached result is returned.
1249
1250
1250 On external property set operations, stat() calls are performed and the new
1251 On external property set operations, stat() calls are performed and the new
1251 value is cached.
1252 value is cached.
1252
1253
1253 On property delete operations, cached data is removed.
1254 On property delete operations, cached data is removed.
1254
1255
1255 When using the property API, cached data is always returned, if available:
1256 When using the property API, cached data is always returned, if available:
1256 no stat() is performed to check if the file has changed and if the function
1257 no stat() is performed to check if the file has changed and if the function
1257 needs to be called to reflect file changes.
1258 needs to be called to reflect file changes.
1258
1259
1259 Others can muck about with the state of the ``_filecache`` dict. e.g. they
1260 Others can muck about with the state of the ``_filecache`` dict. e.g. they
1260 can populate an entry before the property's getter is called. In this case,
1261 can populate an entry before the property's getter is called. In this case,
1261 entries in ``_filecache`` will be used during property operations,
1262 entries in ``_filecache`` will be used during property operations,
1262 if available. If the underlying file changes, it is up to external callers
1263 if available. If the underlying file changes, it is up to external callers
1263 to reflect this by e.g. calling ``delattr(obj, attr)`` to remove the cached
1264 to reflect this by e.g. calling ``delattr(obj, attr)`` to remove the cached
1264 method result as well as possibly calling ``del obj._filecache[attr]`` to
1265 method result as well as possibly calling ``del obj._filecache[attr]`` to
1265 remove the ``filecacheentry``.
1266 remove the ``filecacheentry``.
1266 """
1267 """
1267
1268
1268 def __init__(self, *paths):
1269 def __init__(self, *paths):
1269 self.paths = paths
1270 self.paths = paths
1270
1271
1271 def join(self, obj, fname):
1272 def join(self, obj, fname):
1272 """Used to compute the runtime path of a cached file.
1273 """Used to compute the runtime path of a cached file.
1273
1274
1274 Users should subclass filecache and provide their own version of this
1275 Users should subclass filecache and provide their own version of this
1275 function to call the appropriate join function on 'obj' (an instance
1276 function to call the appropriate join function on 'obj' (an instance
1276 of the class that its member function was decorated).
1277 of the class that its member function was decorated).
1277 """
1278 """
1278 raise NotImplementedError
1279 raise NotImplementedError
1279
1280
1280 def __call__(self, func):
1281 def __call__(self, func):
1281 self.func = func
1282 self.func = func
1282 self.sname = func.__name__
1283 self.sname = func.__name__
1283 self.name = pycompat.sysbytes(self.sname)
1284 self.name = pycompat.sysbytes(self.sname)
1284 return self
1285 return self
1285
1286
1286 def __get__(self, obj, type=None):
1287 def __get__(self, obj, type=None):
1287 # if accessed on the class, return the descriptor itself.
1288 # if accessed on the class, return the descriptor itself.
1288 if obj is None:
1289 if obj is None:
1289 return self
1290 return self
1290 # do we need to check if the file changed?
1291 # do we need to check if the file changed?
1291 if self.sname in obj.__dict__:
1292 if self.sname in obj.__dict__:
1292 assert self.name in obj._filecache, self.name
1293 assert self.name in obj._filecache, self.name
1293 return obj.__dict__[self.sname]
1294 return obj.__dict__[self.sname]
1294
1295
1295 entry = obj._filecache.get(self.name)
1296 entry = obj._filecache.get(self.name)
1296
1297
1297 if entry:
1298 if entry:
1298 if entry.changed():
1299 if entry.changed():
1299 entry.obj = self.func(obj)
1300 entry.obj = self.func(obj)
1300 else:
1301 else:
1301 paths = [self.join(obj, path) for path in self.paths]
1302 paths = [self.join(obj, path) for path in self.paths]
1302
1303
1303 # We stat -before- creating the object so our cache doesn't lie if
1304 # We stat -before- creating the object so our cache doesn't lie if
1304 # a writer modified between the time we read and stat
1305 # a writer modified between the time we read and stat
1305 entry = filecacheentry(paths, True)
1306 entry = filecacheentry(paths, True)
1306 entry.obj = self.func(obj)
1307 entry.obj = self.func(obj)
1307
1308
1308 obj._filecache[self.name] = entry
1309 obj._filecache[self.name] = entry
1309
1310
1310 obj.__dict__[self.sname] = entry.obj
1311 obj.__dict__[self.sname] = entry.obj
1311 return entry.obj
1312 return entry.obj
1312
1313
1313 def __set__(self, obj, value):
1314 def __set__(self, obj, value):
1314 if self.name not in obj._filecache:
1315 if self.name not in obj._filecache:
1315 # we add an entry for the missing value because X in __dict__
1316 # we add an entry for the missing value because X in __dict__
1316 # implies X in _filecache
1317 # implies X in _filecache
1317 paths = [self.join(obj, path) for path in self.paths]
1318 paths = [self.join(obj, path) for path in self.paths]
1318 ce = filecacheentry(paths, False)
1319 ce = filecacheentry(paths, False)
1319 obj._filecache[self.name] = ce
1320 obj._filecache[self.name] = ce
1320 else:
1321 else:
1321 ce = obj._filecache[self.name]
1322 ce = obj._filecache[self.name]
1322
1323
1323 ce.obj = value # update cached copy
1324 ce.obj = value # update cached copy
1324 obj.__dict__[self.sname] = value # update copy returned by obj.x
1325 obj.__dict__[self.sname] = value # update copy returned by obj.x
1325
1326
1326 def __delete__(self, obj):
1327 def __delete__(self, obj):
1327 try:
1328 try:
1328 del obj.__dict__[self.sname]
1329 del obj.__dict__[self.sname]
1329 except KeyError:
1330 except KeyError:
1330 raise AttributeError(self.sname)
1331 raise AttributeError(self.sname)
1331
1332
1332 def extdatasource(repo, source):
1333 def extdatasource(repo, source):
1333 """Gather a map of rev -> value dict from the specified source
1334 """Gather a map of rev -> value dict from the specified source
1334
1335
1335 A source spec is treated as a URL, with a special case shell: type
1336 A source spec is treated as a URL, with a special case shell: type
1336 for parsing the output from a shell command.
1337 for parsing the output from a shell command.
1337
1338
1338 The data is parsed as a series of newline-separated records where
1339 The data is parsed as a series of newline-separated records where
1339 each record is a revision specifier optionally followed by a space
1340 each record is a revision specifier optionally followed by a space
1340 and a freeform string value. If the revision is known locally, it
1341 and a freeform string value. If the revision is known locally, it
1341 is converted to a rev, otherwise the record is skipped.
1342 is converted to a rev, otherwise the record is skipped.
1342
1343
1343 Note that both key and value are treated as UTF-8 and converted to
1344 Note that both key and value are treated as UTF-8 and converted to
1344 the local encoding. This allows uniformity between local and
1345 the local encoding. This allows uniformity between local and
1345 remote data sources.
1346 remote data sources.
1346 """
1347 """
1347
1348
1348 spec = repo.ui.config("extdata", source)
1349 spec = repo.ui.config("extdata", source)
1349 if not spec:
1350 if not spec:
1350 raise error.Abort(_("unknown extdata source '%s'") % source)
1351 raise error.Abort(_("unknown extdata source '%s'") % source)
1351
1352
1352 data = {}
1353 data = {}
1353 src = proc = None
1354 src = proc = None
1354 try:
1355 try:
1355 if spec.startswith("shell:"):
1356 if spec.startswith("shell:"):
1356 # external commands should be run relative to the repo root
1357 # external commands should be run relative to the repo root
1357 cmd = spec[6:]
1358 cmd = spec[6:]
1358 proc = subprocess.Popen(procutil.tonativestr(cmd),
1359 proc = subprocess.Popen(procutil.tonativestr(cmd),
1359 shell=True, bufsize=-1,
1360 shell=True, bufsize=-1,
1360 close_fds=procutil.closefds,
1361 close_fds=procutil.closefds,
1361 stdout=subprocess.PIPE,
1362 stdout=subprocess.PIPE,
1362 cwd=procutil.tonativestr(repo.root))
1363 cwd=procutil.tonativestr(repo.root))
1363 src = proc.stdout
1364 src = proc.stdout
1364 else:
1365 else:
1365 # treat as a URL or file
1366 # treat as a URL or file
1366 src = url.open(repo.ui, spec)
1367 src = url.open(repo.ui, spec)
1367 for l in src:
1368 for l in src:
1368 if " " in l:
1369 if " " in l:
1369 k, v = l.strip().split(" ", 1)
1370 k, v = l.strip().split(" ", 1)
1370 else:
1371 else:
1371 k, v = l.strip(), ""
1372 k, v = l.strip(), ""
1372
1373
1373 k = encoding.tolocal(k)
1374 k = encoding.tolocal(k)
1374 try:
1375 try:
1375 data[revsingle(repo, k).rev()] = encoding.tolocal(v)
1376 data[revsingle(repo, k).rev()] = encoding.tolocal(v)
1376 except (error.LookupError, error.RepoLookupError):
1377 except (error.LookupError, error.RepoLookupError):
1377 pass # we ignore data for nodes that don't exist locally
1378 pass # we ignore data for nodes that don't exist locally
1378 finally:
1379 finally:
1379 if proc:
1380 if proc:
1380 proc.communicate()
1381 proc.communicate()
1381 if src:
1382 if src:
1382 src.close()
1383 src.close()
1383 if proc and proc.returncode != 0:
1384 if proc and proc.returncode != 0:
1384 raise error.Abort(_("extdata command '%s' failed: %s")
1385 raise error.Abort(_("extdata command '%s' failed: %s")
1385 % (cmd, procutil.explainexit(proc.returncode)))
1386 % (cmd, procutil.explainexit(proc.returncode)))
1386
1387
1387 return data
1388 return data
1388
1389
1389 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1390 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1390 if lock is None:
1391 if lock is None:
1391 raise error.LockInheritanceContractViolation(
1392 raise error.LockInheritanceContractViolation(
1392 'lock can only be inherited while held')
1393 'lock can only be inherited while held')
1393 if environ is None:
1394 if environ is None:
1394 environ = {}
1395 environ = {}
1395 with lock.inherit() as locker:
1396 with lock.inherit() as locker:
1396 environ[envvar] = locker
1397 environ[envvar] = locker
1397 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1398 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1398
1399
1399 def wlocksub(repo, cmd, *args, **kwargs):
1400 def wlocksub(repo, cmd, *args, **kwargs):
1400 """run cmd as a subprocess that allows inheriting repo's wlock
1401 """run cmd as a subprocess that allows inheriting repo's wlock
1401
1402
1402 This can only be called while the wlock is held. This takes all the
1403 This can only be called while the wlock is held. This takes all the
1403 arguments that ui.system does, and returns the exit code of the
1404 arguments that ui.system does, and returns the exit code of the
1404 subprocess."""
1405 subprocess."""
1405 return _locksub(repo, repo.currentwlock(), 'HG_WLOCK_LOCKER', cmd, *args,
1406 return _locksub(repo, repo.currentwlock(), 'HG_WLOCK_LOCKER', cmd, *args,
1406 **kwargs)
1407 **kwargs)
1407
1408
1408 class progress(object):
1409 class progress(object):
1409 def __init__(self, ui, topic, unit="", total=None):
1410 def __init__(self, ui, topic, unit="", total=None):
1410 self.ui = ui
1411 self.ui = ui
1411 self.pos = 0
1412 self.pos = 0
1412 self.topic = topic
1413 self.topic = topic
1413 self.unit = unit
1414 self.unit = unit
1414 self.total = total
1415 self.total = total
1415
1416
1416 def __enter__(self):
1417 def __enter__(self):
1417 return self
1418 return self
1418
1419
1419 def __exit__(self, exc_type, exc_value, exc_tb):
1420 def __exit__(self, exc_type, exc_value, exc_tb):
1420 self.complete()
1421 self.complete()
1421
1422
1422 def update(self, pos, item="", total=None):
1423 def update(self, pos, item="", total=None):
1423 assert pos is not None
1424 assert pos is not None
1424 if total:
1425 if total:
1425 self.total = total
1426 self.total = total
1426 self.pos = pos
1427 self.pos = pos
1427 self._print(item)
1428 self._print(item)
1428
1429
1429 def increment(self, step=1, item="", total=None):
1430 def increment(self, step=1, item="", total=None):
1430 self.update(self.pos + step, item, total)
1431 self.update(self.pos + step, item, total)
1431
1432
1432 def complete(self):
1433 def complete(self):
1433 self.ui.progress(self.topic, None)
1434 self.ui.progress(self.topic, None)
1434
1435
1435 def _print(self, item):
1436 def _print(self, item):
1436 self.ui.progress(self.topic, self.pos, item, self.unit,
1437 self.ui.progress(self.topic, self.pos, item, self.unit,
1437 self.total)
1438 self.total)
1438
1439
1439 def gdinitconfig(ui):
1440 def gdinitconfig(ui):
1440 """helper function to know if a repo should be created as general delta
1441 """helper function to know if a repo should be created as general delta
1441 """
1442 """
1442 # experimental config: format.generaldelta
1443 # experimental config: format.generaldelta
1443 return (ui.configbool('format', 'generaldelta')
1444 return (ui.configbool('format', 'generaldelta')
1444 or ui.configbool('format', 'usegeneraldelta')
1445 or ui.configbool('format', 'usegeneraldelta')
1445 or ui.configbool('format', 'sparse-revlog'))
1446 or ui.configbool('format', 'sparse-revlog'))
1446
1447
1447 def gddeltaconfig(ui):
1448 def gddeltaconfig(ui):
1448 """helper function to know if incoming delta should be optimised
1449 """helper function to know if incoming delta should be optimised
1449 """
1450 """
1450 # experimental config: format.generaldelta
1451 # experimental config: format.generaldelta
1451 return ui.configbool('format', 'generaldelta')
1452 return ui.configbool('format', 'generaldelta')
1452
1453
1453 class simplekeyvaluefile(object):
1454 class simplekeyvaluefile(object):
1454 """A simple file with key=value lines
1455 """A simple file with key=value lines
1455
1456
1456 Keys must be alphanumerics and start with a letter, values must not
1457 Keys must be alphanumerics and start with a letter, values must not
1457 contain '\n' characters"""
1458 contain '\n' characters"""
1458 firstlinekey = '__firstline'
1459 firstlinekey = '__firstline'
1459
1460
1460 def __init__(self, vfs, path, keys=None):
1461 def __init__(self, vfs, path, keys=None):
1461 self.vfs = vfs
1462 self.vfs = vfs
1462 self.path = path
1463 self.path = path
1463
1464
1464 def read(self, firstlinenonkeyval=False):
1465 def read(self, firstlinenonkeyval=False):
1465 """Read the contents of a simple key-value file
1466 """Read the contents of a simple key-value file
1466
1467
1467 'firstlinenonkeyval' indicates whether the first line of file should
1468 'firstlinenonkeyval' indicates whether the first line of file should
1468 be treated as a key-value pair or reuturned fully under the
1469 be treated as a key-value pair or reuturned fully under the
1469 __firstline key."""
1470 __firstline key."""
1470 lines = self.vfs.readlines(self.path)
1471 lines = self.vfs.readlines(self.path)
1471 d = {}
1472 d = {}
1472 if firstlinenonkeyval:
1473 if firstlinenonkeyval:
1473 if not lines:
1474 if not lines:
1474 e = _("empty simplekeyvalue file")
1475 e = _("empty simplekeyvalue file")
1475 raise error.CorruptedState(e)
1476 raise error.CorruptedState(e)
1476 # we don't want to include '\n' in the __firstline
1477 # we don't want to include '\n' in the __firstline
1477 d[self.firstlinekey] = lines[0][:-1]
1478 d[self.firstlinekey] = lines[0][:-1]
1478 del lines[0]
1479 del lines[0]
1479
1480
1480 try:
1481 try:
1481 # the 'if line.strip()' part prevents us from failing on empty
1482 # the 'if line.strip()' part prevents us from failing on empty
1482 # lines which only contain '\n' therefore are not skipped
1483 # lines which only contain '\n' therefore are not skipped
1483 # by 'if line'
1484 # by 'if line'
1484 updatedict = dict(line[:-1].split('=', 1) for line in lines
1485 updatedict = dict(line[:-1].split('=', 1) for line in lines
1485 if line.strip())
1486 if line.strip())
1486 if self.firstlinekey in updatedict:
1487 if self.firstlinekey in updatedict:
1487 e = _("%r can't be used as a key")
1488 e = _("%r can't be used as a key")
1488 raise error.CorruptedState(e % self.firstlinekey)
1489 raise error.CorruptedState(e % self.firstlinekey)
1489 d.update(updatedict)
1490 d.update(updatedict)
1490 except ValueError as e:
1491 except ValueError as e:
1491 raise error.CorruptedState(str(e))
1492 raise error.CorruptedState(str(e))
1492 return d
1493 return d
1493
1494
1494 def write(self, data, firstline=None):
1495 def write(self, data, firstline=None):
1495 """Write key=>value mapping to a file
1496 """Write key=>value mapping to a file
1496 data is a dict. Keys must be alphanumerical and start with a letter.
1497 data is a dict. Keys must be alphanumerical and start with a letter.
1497 Values must not contain newline characters.
1498 Values must not contain newline characters.
1498
1499
1499 If 'firstline' is not None, it is written to file before
1500 If 'firstline' is not None, it is written to file before
1500 everything else, as it is, not in a key=value form"""
1501 everything else, as it is, not in a key=value form"""
1501 lines = []
1502 lines = []
1502 if firstline is not None:
1503 if firstline is not None:
1503 lines.append('%s\n' % firstline)
1504 lines.append('%s\n' % firstline)
1504
1505
1505 for k, v in data.items():
1506 for k, v in data.items():
1506 if k == self.firstlinekey:
1507 if k == self.firstlinekey:
1507 e = "key name '%s' is reserved" % self.firstlinekey
1508 e = "key name '%s' is reserved" % self.firstlinekey
1508 raise error.ProgrammingError(e)
1509 raise error.ProgrammingError(e)
1509 if not k[0:1].isalpha():
1510 if not k[0:1].isalpha():
1510 e = "keys must start with a letter in a key-value file"
1511 e = "keys must start with a letter in a key-value file"
1511 raise error.ProgrammingError(e)
1512 raise error.ProgrammingError(e)
1512 if not k.isalnum():
1513 if not k.isalnum():
1513 e = "invalid key name in a simple key-value file"
1514 e = "invalid key name in a simple key-value file"
1514 raise error.ProgrammingError(e)
1515 raise error.ProgrammingError(e)
1515 if '\n' in v:
1516 if '\n' in v:
1516 e = "invalid value in a simple key-value file"
1517 e = "invalid value in a simple key-value file"
1517 raise error.ProgrammingError(e)
1518 raise error.ProgrammingError(e)
1518 lines.append("%s=%s\n" % (k, v))
1519 lines.append("%s=%s\n" % (k, v))
1519 with self.vfs(self.path, mode='wb', atomictemp=True) as fp:
1520 with self.vfs(self.path, mode='wb', atomictemp=True) as fp:
1520 fp.write(''.join(lines))
1521 fp.write(''.join(lines))
1521
1522
1522 _reportobsoletedsource = [
1523 _reportobsoletedsource = [
1523 'debugobsolete',
1524 'debugobsolete',
1524 'pull',
1525 'pull',
1525 'push',
1526 'push',
1526 'serve',
1527 'serve',
1527 'unbundle',
1528 'unbundle',
1528 ]
1529 ]
1529
1530
1530 _reportnewcssource = [
1531 _reportnewcssource = [
1531 'pull',
1532 'pull',
1532 'unbundle',
1533 'unbundle',
1533 ]
1534 ]
1534
1535
1535 def prefetchfiles(repo, revs, match):
1536 def prefetchfiles(repo, revs, match):
1536 """Invokes the registered file prefetch functions, allowing extensions to
1537 """Invokes the registered file prefetch functions, allowing extensions to
1537 ensure the corresponding files are available locally, before the command
1538 ensure the corresponding files are available locally, before the command
1538 uses them."""
1539 uses them."""
1539 if match:
1540 if match:
1540 # The command itself will complain about files that don't exist, so
1541 # The command itself will complain about files that don't exist, so
1541 # don't duplicate the message.
1542 # don't duplicate the message.
1542 match = matchmod.badmatch(match, lambda fn, msg: None)
1543 match = matchmod.badmatch(match, lambda fn, msg: None)
1543 else:
1544 else:
1544 match = matchall(repo)
1545 match = matchall(repo)
1545
1546
1546 fileprefetchhooks(repo, revs, match)
1547 fileprefetchhooks(repo, revs, match)
1547
1548
1548 # a list of (repo, revs, match) prefetch functions
1549 # a list of (repo, revs, match) prefetch functions
1549 fileprefetchhooks = util.hooks()
1550 fileprefetchhooks = util.hooks()
1550
1551
1551 # A marker that tells the evolve extension to suppress its own reporting
1552 # A marker that tells the evolve extension to suppress its own reporting
1552 _reportstroubledchangesets = True
1553 _reportstroubledchangesets = True
1553
1554
1554 def registersummarycallback(repo, otr, txnname=''):
1555 def registersummarycallback(repo, otr, txnname=''):
1555 """register a callback to issue a summary after the transaction is closed
1556 """register a callback to issue a summary after the transaction is closed
1556 """
1557 """
1557 def txmatch(sources):
1558 def txmatch(sources):
1558 return any(txnname.startswith(source) for source in sources)
1559 return any(txnname.startswith(source) for source in sources)
1559
1560
1560 categories = []
1561 categories = []
1561
1562
1562 def reportsummary(func):
1563 def reportsummary(func):
1563 """decorator for report callbacks."""
1564 """decorator for report callbacks."""
1564 # The repoview life cycle is shorter than the one of the actual
1565 # The repoview life cycle is shorter than the one of the actual
1565 # underlying repository. So the filtered object can die before the
1566 # underlying repository. So the filtered object can die before the
1566 # weakref is used leading to troubles. We keep a reference to the
1567 # weakref is used leading to troubles. We keep a reference to the
1567 # unfiltered object and restore the filtering when retrieving the
1568 # unfiltered object and restore the filtering when retrieving the
1568 # repository through the weakref.
1569 # repository through the weakref.
1569 filtername = repo.filtername
1570 filtername = repo.filtername
1570 reporef = weakref.ref(repo.unfiltered())
1571 reporef = weakref.ref(repo.unfiltered())
1571 def wrapped(tr):
1572 def wrapped(tr):
1572 repo = reporef()
1573 repo = reporef()
1573 if filtername:
1574 if filtername:
1574 repo = repo.filtered(filtername)
1575 repo = repo.filtered(filtername)
1575 func(repo, tr)
1576 func(repo, tr)
1576 newcat = '%02i-txnreport' % len(categories)
1577 newcat = '%02i-txnreport' % len(categories)
1577 otr.addpostclose(newcat, wrapped)
1578 otr.addpostclose(newcat, wrapped)
1578 categories.append(newcat)
1579 categories.append(newcat)
1579 return wrapped
1580 return wrapped
1580
1581
1581 if txmatch(_reportobsoletedsource):
1582 if txmatch(_reportobsoletedsource):
1582 @reportsummary
1583 @reportsummary
1583 def reportobsoleted(repo, tr):
1584 def reportobsoleted(repo, tr):
1584 obsoleted = obsutil.getobsoleted(repo, tr)
1585 obsoleted = obsutil.getobsoleted(repo, tr)
1585 if obsoleted:
1586 if obsoleted:
1586 repo.ui.status(_('obsoleted %i changesets\n')
1587 repo.ui.status(_('obsoleted %i changesets\n')
1587 % len(obsoleted))
1588 % len(obsoleted))
1588
1589
1589 if (obsolete.isenabled(repo, obsolete.createmarkersopt) and
1590 if (obsolete.isenabled(repo, obsolete.createmarkersopt) and
1590 repo.ui.configbool('experimental', 'evolution.report-instabilities')):
1591 repo.ui.configbool('experimental', 'evolution.report-instabilities')):
1591 instabilitytypes = [
1592 instabilitytypes = [
1592 ('orphan', 'orphan'),
1593 ('orphan', 'orphan'),
1593 ('phase-divergent', 'phasedivergent'),
1594 ('phase-divergent', 'phasedivergent'),
1594 ('content-divergent', 'contentdivergent'),
1595 ('content-divergent', 'contentdivergent'),
1595 ]
1596 ]
1596
1597
1597 def getinstabilitycounts(repo):
1598 def getinstabilitycounts(repo):
1598 filtered = repo.changelog.filteredrevs
1599 filtered = repo.changelog.filteredrevs
1599 counts = {}
1600 counts = {}
1600 for instability, revset in instabilitytypes:
1601 for instability, revset in instabilitytypes:
1601 counts[instability] = len(set(obsolete.getrevs(repo, revset)) -
1602 counts[instability] = len(set(obsolete.getrevs(repo, revset)) -
1602 filtered)
1603 filtered)
1603 return counts
1604 return counts
1604
1605
1605 oldinstabilitycounts = getinstabilitycounts(repo)
1606 oldinstabilitycounts = getinstabilitycounts(repo)
1606 @reportsummary
1607 @reportsummary
1607 def reportnewinstabilities(repo, tr):
1608 def reportnewinstabilities(repo, tr):
1608 newinstabilitycounts = getinstabilitycounts(repo)
1609 newinstabilitycounts = getinstabilitycounts(repo)
1609 for instability, revset in instabilitytypes:
1610 for instability, revset in instabilitytypes:
1610 delta = (newinstabilitycounts[instability] -
1611 delta = (newinstabilitycounts[instability] -
1611 oldinstabilitycounts[instability])
1612 oldinstabilitycounts[instability])
1612 msg = getinstabilitymessage(delta, instability)
1613 msg = getinstabilitymessage(delta, instability)
1613 if msg:
1614 if msg:
1614 repo.ui.warn(msg)
1615 repo.ui.warn(msg)
1615
1616
1616 if txmatch(_reportnewcssource):
1617 if txmatch(_reportnewcssource):
1617 @reportsummary
1618 @reportsummary
1618 def reportnewcs(repo, tr):
1619 def reportnewcs(repo, tr):
1619 """Report the range of new revisions pulled/unbundled."""
1620 """Report the range of new revisions pulled/unbundled."""
1620 origrepolen = tr.changes.get('origrepolen', len(repo))
1621 origrepolen = tr.changes.get('origrepolen', len(repo))
1621 unfi = repo.unfiltered()
1622 unfi = repo.unfiltered()
1622 if origrepolen >= len(unfi):
1623 if origrepolen >= len(unfi):
1623 return
1624 return
1624
1625
1625 # Compute the bounds of new visible revisions' range.
1626 # Compute the bounds of new visible revisions' range.
1626 revs = smartset.spanset(repo, start=origrepolen)
1627 revs = smartset.spanset(repo, start=origrepolen)
1627 if revs:
1628 if revs:
1628 minrev, maxrev = repo[revs.min()], repo[revs.max()]
1629 minrev, maxrev = repo[revs.min()], repo[revs.max()]
1629
1630
1630 if minrev == maxrev:
1631 if minrev == maxrev:
1631 revrange = minrev
1632 revrange = minrev
1632 else:
1633 else:
1633 revrange = '%s:%s' % (minrev, maxrev)
1634 revrange = '%s:%s' % (minrev, maxrev)
1634 draft = len(repo.revs('%ld and draft()', revs))
1635 draft = len(repo.revs('%ld and draft()', revs))
1635 secret = len(repo.revs('%ld and secret()', revs))
1636 secret = len(repo.revs('%ld and secret()', revs))
1636 if not (draft or secret):
1637 if not (draft or secret):
1637 msg = _('new changesets %s\n') % revrange
1638 msg = _('new changesets %s\n') % revrange
1638 elif draft and secret:
1639 elif draft and secret:
1639 msg = _('new changesets %s (%d drafts, %d secrets)\n')
1640 msg = _('new changesets %s (%d drafts, %d secrets)\n')
1640 msg %= (revrange, draft, secret)
1641 msg %= (revrange, draft, secret)
1641 elif draft:
1642 elif draft:
1642 msg = _('new changesets %s (%d drafts)\n')
1643 msg = _('new changesets %s (%d drafts)\n')
1643 msg %= (revrange, draft)
1644 msg %= (revrange, draft)
1644 elif secret:
1645 elif secret:
1645 msg = _('new changesets %s (%d secrets)\n')
1646 msg = _('new changesets %s (%d secrets)\n')
1646 msg %= (revrange, secret)
1647 msg %= (revrange, secret)
1647 else:
1648 else:
1648 errormsg = 'entered unreachable condition'
1649 errormsg = 'entered unreachable condition'
1649 raise error.ProgrammingError(errormsg)
1650 raise error.ProgrammingError(errormsg)
1650 repo.ui.status(msg)
1651 repo.ui.status(msg)
1651
1652
1652 # search new changesets directly pulled as obsolete
1653 # search new changesets directly pulled as obsolete
1653 duplicates = tr.changes.get('revduplicates', ())
1654 duplicates = tr.changes.get('revduplicates', ())
1654 obsadded = unfi.revs('(%d: + %ld) and obsolete()',
1655 obsadded = unfi.revs('(%d: + %ld) and obsolete()',
1655 origrepolen, duplicates)
1656 origrepolen, duplicates)
1656 cl = repo.changelog
1657 cl = repo.changelog
1657 extinctadded = [r for r in obsadded if r not in cl]
1658 extinctadded = [r for r in obsadded if r not in cl]
1658 if extinctadded:
1659 if extinctadded:
1659 # They are not just obsolete, but obsolete and invisible
1660 # They are not just obsolete, but obsolete and invisible
1660 # we call them "extinct" internally but the terms have not been
1661 # we call them "extinct" internally but the terms have not been
1661 # exposed to users.
1662 # exposed to users.
1662 msg = '(%d other changesets obsolete on arrival)\n'
1663 msg = '(%d other changesets obsolete on arrival)\n'
1663 repo.ui.status(msg % len(extinctadded))
1664 repo.ui.status(msg % len(extinctadded))
1664
1665
1665 @reportsummary
1666 @reportsummary
1666 def reportphasechanges(repo, tr):
1667 def reportphasechanges(repo, tr):
1667 """Report statistics of phase changes for changesets pre-existing
1668 """Report statistics of phase changes for changesets pre-existing
1668 pull/unbundle.
1669 pull/unbundle.
1669 """
1670 """
1670 origrepolen = tr.changes.get('origrepolen', len(repo))
1671 origrepolen = tr.changes.get('origrepolen', len(repo))
1671 phasetracking = tr.changes.get('phases', {})
1672 phasetracking = tr.changes.get('phases', {})
1672 if not phasetracking:
1673 if not phasetracking:
1673 return
1674 return
1674 published = [
1675 published = [
1675 rev for rev, (old, new) in phasetracking.iteritems()
1676 rev for rev, (old, new) in phasetracking.iteritems()
1676 if new == phases.public and rev < origrepolen
1677 if new == phases.public and rev < origrepolen
1677 ]
1678 ]
1678 if not published:
1679 if not published:
1679 return
1680 return
1680 repo.ui.status(_('%d local changesets published\n')
1681 repo.ui.status(_('%d local changesets published\n')
1681 % len(published))
1682 % len(published))
1682
1683
1683 def getinstabilitymessage(delta, instability):
1684 def getinstabilitymessage(delta, instability):
1684 """function to return the message to show warning about new instabilities
1685 """function to return the message to show warning about new instabilities
1685
1686
1686 exists as a separate function so that extension can wrap to show more
1687 exists as a separate function so that extension can wrap to show more
1687 information like how to fix instabilities"""
1688 information like how to fix instabilities"""
1688 if delta > 0:
1689 if delta > 0:
1689 return _('%i new %s changesets\n') % (delta, instability)
1690 return _('%i new %s changesets\n') % (delta, instability)
1690
1691
1691 def nodesummaries(repo, nodes, maxnumnodes=4):
1692 def nodesummaries(repo, nodes, maxnumnodes=4):
1692 if len(nodes) <= maxnumnodes or repo.ui.verbose:
1693 if len(nodes) <= maxnumnodes or repo.ui.verbose:
1693 return ' '.join(short(h) for h in nodes)
1694 return ' '.join(short(h) for h in nodes)
1694 first = ' '.join(short(h) for h in nodes[:maxnumnodes])
1695 first = ' '.join(short(h) for h in nodes[:maxnumnodes])
1695 return _("%s and %d others") % (first, len(nodes) - maxnumnodes)
1696 return _("%s and %d others") % (first, len(nodes) - maxnumnodes)
1696
1697
1697 def enforcesinglehead(repo, tr, desc):
1698 def enforcesinglehead(repo, tr, desc):
1698 """check that no named branch has multiple heads"""
1699 """check that no named branch has multiple heads"""
1699 if desc in ('strip', 'repair'):
1700 if desc in ('strip', 'repair'):
1700 # skip the logic during strip
1701 # skip the logic during strip
1701 return
1702 return
1702 visible = repo.filtered('visible')
1703 visible = repo.filtered('visible')
1703 # possible improvement: we could restrict the check to affected branch
1704 # possible improvement: we could restrict the check to affected branch
1704 for name, heads in visible.branchmap().iteritems():
1705 for name, heads in visible.branchmap().iteritems():
1705 if len(heads) > 1:
1706 if len(heads) > 1:
1706 msg = _('rejecting multiple heads on branch "%s"')
1707 msg = _('rejecting multiple heads on branch "%s"')
1707 msg %= name
1708 msg %= name
1708 hint = _('%d heads: %s')
1709 hint = _('%d heads: %s')
1709 hint %= (len(heads), nodesummaries(repo, heads))
1710 hint %= (len(heads), nodesummaries(repo, heads))
1710 raise error.Abort(msg, hint=hint)
1711 raise error.Abort(msg, hint=hint)
1711
1712
1712 def wrapconvertsink(sink):
1713 def wrapconvertsink(sink):
1713 """Allow extensions to wrap the sink returned by convcmd.convertsink()
1714 """Allow extensions to wrap the sink returned by convcmd.convertsink()
1714 before it is used, whether or not the convert extension was formally loaded.
1715 before it is used, whether or not the convert extension was formally loaded.
1715 """
1716 """
1716 return sink
1717 return sink
1717
1718
1718 def unhidehashlikerevs(repo, specs, hiddentype):
1719 def unhidehashlikerevs(repo, specs, hiddentype):
1719 """parse the user specs and unhide changesets whose hash or revision number
1720 """parse the user specs and unhide changesets whose hash or revision number
1720 is passed.
1721 is passed.
1721
1722
1722 hiddentype can be: 1) 'warn': warn while unhiding changesets
1723 hiddentype can be: 1) 'warn': warn while unhiding changesets
1723 2) 'nowarn': don't warn while unhiding changesets
1724 2) 'nowarn': don't warn while unhiding changesets
1724
1725
1725 returns a repo object with the required changesets unhidden
1726 returns a repo object with the required changesets unhidden
1726 """
1727 """
1727 if not repo.filtername or not repo.ui.configbool('experimental',
1728 if not repo.filtername or not repo.ui.configbool('experimental',
1728 'directaccess'):
1729 'directaccess'):
1729 return repo
1730 return repo
1730
1731
1731 if repo.filtername not in ('visible', 'visible-hidden'):
1732 if repo.filtername not in ('visible', 'visible-hidden'):
1732 return repo
1733 return repo
1733
1734
1734 symbols = set()
1735 symbols = set()
1735 for spec in specs:
1736 for spec in specs:
1736 try:
1737 try:
1737 tree = revsetlang.parse(spec)
1738 tree = revsetlang.parse(spec)
1738 except error.ParseError: # will be reported by scmutil.revrange()
1739 except error.ParseError: # will be reported by scmutil.revrange()
1739 continue
1740 continue
1740
1741
1741 symbols.update(revsetlang.gethashlikesymbols(tree))
1742 symbols.update(revsetlang.gethashlikesymbols(tree))
1742
1743
1743 if not symbols:
1744 if not symbols:
1744 return repo
1745 return repo
1745
1746
1746 revs = _getrevsfromsymbols(repo, symbols)
1747 revs = _getrevsfromsymbols(repo, symbols)
1747
1748
1748 if not revs:
1749 if not revs:
1749 return repo
1750 return repo
1750
1751
1751 if hiddentype == 'warn':
1752 if hiddentype == 'warn':
1752 unfi = repo.unfiltered()
1753 unfi = repo.unfiltered()
1753 revstr = ", ".join([pycompat.bytestr(unfi[l]) for l in revs])
1754 revstr = ", ".join([pycompat.bytestr(unfi[l]) for l in revs])
1754 repo.ui.warn(_("warning: accessing hidden changesets for write "
1755 repo.ui.warn(_("warning: accessing hidden changesets for write "
1755 "operation: %s\n") % revstr)
1756 "operation: %s\n") % revstr)
1756
1757
1757 # we have to use new filtername to separate branch/tags cache until we can
1758 # we have to use new filtername to separate branch/tags cache until we can
1758 # disbale these cache when revisions are dynamically pinned.
1759 # disbale these cache when revisions are dynamically pinned.
1759 return repo.filtered('visible-hidden', revs)
1760 return repo.filtered('visible-hidden', revs)
1760
1761
1761 def _getrevsfromsymbols(repo, symbols):
1762 def _getrevsfromsymbols(repo, symbols):
1762 """parse the list of symbols and returns a set of revision numbers of hidden
1763 """parse the list of symbols and returns a set of revision numbers of hidden
1763 changesets present in symbols"""
1764 changesets present in symbols"""
1764 revs = set()
1765 revs = set()
1765 unfi = repo.unfiltered()
1766 unfi = repo.unfiltered()
1766 unficl = unfi.changelog
1767 unficl = unfi.changelog
1767 cl = repo.changelog
1768 cl = repo.changelog
1768 tiprev = len(unficl)
1769 tiprev = len(unficl)
1769 allowrevnums = repo.ui.configbool('experimental', 'directaccess.revnums')
1770 allowrevnums = repo.ui.configbool('experimental', 'directaccess.revnums')
1770 for s in symbols:
1771 for s in symbols:
1771 try:
1772 try:
1772 n = int(s)
1773 n = int(s)
1773 if n <= tiprev:
1774 if n <= tiprev:
1774 if not allowrevnums:
1775 if not allowrevnums:
1775 continue
1776 continue
1776 else:
1777 else:
1777 if n not in cl:
1778 if n not in cl:
1778 revs.add(n)
1779 revs.add(n)
1779 continue
1780 continue
1780 except ValueError:
1781 except ValueError:
1781 pass
1782 pass
1782
1783
1783 try:
1784 try:
1784 s = resolvehexnodeidprefix(unfi, s)
1785 s = resolvehexnodeidprefix(unfi, s)
1785 except (error.LookupError, error.WdirUnsupported):
1786 except (error.LookupError, error.WdirUnsupported):
1786 s = None
1787 s = None
1787
1788
1788 if s is not None:
1789 if s is not None:
1789 rev = unficl.rev(s)
1790 rev = unficl.rev(s)
1790 if rev not in cl:
1791 if rev not in cl:
1791 revs.add(rev)
1792 revs.add(rev)
1792
1793
1793 return revs
1794 return revs
1794
1795
1795 def bookmarkrevs(repo, mark):
1796 def bookmarkrevs(repo, mark):
1796 """
1797 """
1797 Select revisions reachable by a given bookmark
1798 Select revisions reachable by a given bookmark
1798 """
1799 """
1799 return repo.revs("ancestors(bookmark(%s)) - "
1800 return repo.revs("ancestors(bookmark(%s)) - "
1800 "ancestors(head() and not bookmark(%s)) - "
1801 "ancestors(head() and not bookmark(%s)) - "
1801 "ancestors(bookmark() and not bookmark(%s))",
1802 "ancestors(bookmark() and not bookmark(%s))",
1802 mark, mark, mark)
1803 mark, mark, mark)
General Comments 0
You need to be logged in to leave comments. Login now