##// END OF EJS Templates
statichttprepo: use new functions for requirements validation...
Gregory Szorc -
r39730:61929805 default
parent child Browse files
Show More
@@ -1,1791 +1,1772 b''
1 # scmutil.py - Mercurial core utility functions
1 # scmutil.py - Mercurial core utility functions
2 #
2 #
3 # Copyright Matt Mackall <mpm@selenic.com>
3 # Copyright Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import glob
11 import glob
12 import hashlib
12 import hashlib
13 import os
13 import os
14 import re
14 import re
15 import socket
15 import socket
16 import subprocess
16 import subprocess
17 import weakref
17 import weakref
18
18
19 from .i18n import _
19 from .i18n import _
20 from .node import (
20 from .node import (
21 bin,
21 bin,
22 hex,
22 hex,
23 nullid,
23 nullid,
24 short,
24 short,
25 wdirid,
25 wdirid,
26 wdirrev,
26 wdirrev,
27 )
27 )
28
28
29 from . import (
29 from . import (
30 encoding,
30 encoding,
31 error,
31 error,
32 match as matchmod,
32 match as matchmod,
33 obsolete,
33 obsolete,
34 obsutil,
34 obsutil,
35 pathutil,
35 pathutil,
36 phases,
36 phases,
37 policy,
37 policy,
38 pycompat,
38 pycompat,
39 revsetlang,
39 revsetlang,
40 similar,
40 similar,
41 url,
41 url,
42 util,
42 util,
43 vfs,
43 vfs,
44 )
44 )
45
45
46 from .utils import (
46 from .utils import (
47 procutil,
47 procutil,
48 stringutil,
48 stringutil,
49 )
49 )
50
50
51 if pycompat.iswindows:
51 if pycompat.iswindows:
52 from . import scmwindows as scmplatform
52 from . import scmwindows as scmplatform
53 else:
53 else:
54 from . import scmposix as scmplatform
54 from . import scmposix as scmplatform
55
55
56 parsers = policy.importmod(r'parsers')
56 parsers = policy.importmod(r'parsers')
57
57
58 termsize = scmplatform.termsize
58 termsize = scmplatform.termsize
59
59
60 class status(tuple):
60 class status(tuple):
61 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
61 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
62 and 'ignored' properties are only relevant to the working copy.
62 and 'ignored' properties are only relevant to the working copy.
63 '''
63 '''
64
64
65 __slots__ = ()
65 __slots__ = ()
66
66
67 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
67 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
68 clean):
68 clean):
69 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
69 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
70 ignored, clean))
70 ignored, clean))
71
71
72 @property
72 @property
73 def modified(self):
73 def modified(self):
74 '''files that have been modified'''
74 '''files that have been modified'''
75 return self[0]
75 return self[0]
76
76
77 @property
77 @property
78 def added(self):
78 def added(self):
79 '''files that have been added'''
79 '''files that have been added'''
80 return self[1]
80 return self[1]
81
81
82 @property
82 @property
83 def removed(self):
83 def removed(self):
84 '''files that have been removed'''
84 '''files that have been removed'''
85 return self[2]
85 return self[2]
86
86
87 @property
87 @property
88 def deleted(self):
88 def deleted(self):
89 '''files that are in the dirstate, but have been deleted from the
89 '''files that are in the dirstate, but have been deleted from the
90 working copy (aka "missing")
90 working copy (aka "missing")
91 '''
91 '''
92 return self[3]
92 return self[3]
93
93
94 @property
94 @property
95 def unknown(self):
95 def unknown(self):
96 '''files not in the dirstate that are not ignored'''
96 '''files not in the dirstate that are not ignored'''
97 return self[4]
97 return self[4]
98
98
99 @property
99 @property
100 def ignored(self):
100 def ignored(self):
101 '''files not in the dirstate that are ignored (by _dirignore())'''
101 '''files not in the dirstate that are ignored (by _dirignore())'''
102 return self[5]
102 return self[5]
103
103
104 @property
104 @property
105 def clean(self):
105 def clean(self):
106 '''files that have not been modified'''
106 '''files that have not been modified'''
107 return self[6]
107 return self[6]
108
108
109 def __repr__(self, *args, **kwargs):
109 def __repr__(self, *args, **kwargs):
110 return ((r'<status modified=%s, added=%s, removed=%s, deleted=%s, '
110 return ((r'<status modified=%s, added=%s, removed=%s, deleted=%s, '
111 r'unknown=%s, ignored=%s, clean=%s>') %
111 r'unknown=%s, ignored=%s, clean=%s>') %
112 tuple(pycompat.sysstr(stringutil.pprint(v)) for v in self))
112 tuple(pycompat.sysstr(stringutil.pprint(v)) for v in self))
113
113
114 def itersubrepos(ctx1, ctx2):
114 def itersubrepos(ctx1, ctx2):
115 """find subrepos in ctx1 or ctx2"""
115 """find subrepos in ctx1 or ctx2"""
116 # Create a (subpath, ctx) mapping where we prefer subpaths from
116 # Create a (subpath, ctx) mapping where we prefer subpaths from
117 # ctx1. The subpaths from ctx2 are important when the .hgsub file
117 # ctx1. The subpaths from ctx2 are important when the .hgsub file
118 # has been modified (in ctx2) but not yet committed (in ctx1).
118 # has been modified (in ctx2) but not yet committed (in ctx1).
119 subpaths = dict.fromkeys(ctx2.substate, ctx2)
119 subpaths = dict.fromkeys(ctx2.substate, ctx2)
120 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
120 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
121
121
122 missing = set()
122 missing = set()
123
123
124 for subpath in ctx2.substate:
124 for subpath in ctx2.substate:
125 if subpath not in ctx1.substate:
125 if subpath not in ctx1.substate:
126 del subpaths[subpath]
126 del subpaths[subpath]
127 missing.add(subpath)
127 missing.add(subpath)
128
128
129 for subpath, ctx in sorted(subpaths.iteritems()):
129 for subpath, ctx in sorted(subpaths.iteritems()):
130 yield subpath, ctx.sub(subpath)
130 yield subpath, ctx.sub(subpath)
131
131
132 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
132 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
133 # status and diff will have an accurate result when it does
133 # status and diff will have an accurate result when it does
134 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
134 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
135 # against itself.
135 # against itself.
136 for subpath in missing:
136 for subpath in missing:
137 yield subpath, ctx2.nullsub(subpath, ctx1)
137 yield subpath, ctx2.nullsub(subpath, ctx1)
138
138
139 def nochangesfound(ui, repo, excluded=None):
139 def nochangesfound(ui, repo, excluded=None):
140 '''Report no changes for push/pull, excluded is None or a list of
140 '''Report no changes for push/pull, excluded is None or a list of
141 nodes excluded from the push/pull.
141 nodes excluded from the push/pull.
142 '''
142 '''
143 secretlist = []
143 secretlist = []
144 if excluded:
144 if excluded:
145 for n in excluded:
145 for n in excluded:
146 ctx = repo[n]
146 ctx = repo[n]
147 if ctx.phase() >= phases.secret and not ctx.extinct():
147 if ctx.phase() >= phases.secret and not ctx.extinct():
148 secretlist.append(n)
148 secretlist.append(n)
149
149
150 if secretlist:
150 if secretlist:
151 ui.status(_("no changes found (ignored %d secret changesets)\n")
151 ui.status(_("no changes found (ignored %d secret changesets)\n")
152 % len(secretlist))
152 % len(secretlist))
153 else:
153 else:
154 ui.status(_("no changes found\n"))
154 ui.status(_("no changes found\n"))
155
155
156 def callcatch(ui, func):
156 def callcatch(ui, func):
157 """call func() with global exception handling
157 """call func() with global exception handling
158
158
159 return func() if no exception happens. otherwise do some error handling
159 return func() if no exception happens. otherwise do some error handling
160 and return an exit code accordingly. does not handle all exceptions.
160 and return an exit code accordingly. does not handle all exceptions.
161 """
161 """
162 try:
162 try:
163 try:
163 try:
164 return func()
164 return func()
165 except: # re-raises
165 except: # re-raises
166 ui.traceback()
166 ui.traceback()
167 raise
167 raise
168 # Global exception handling, alphabetically
168 # Global exception handling, alphabetically
169 # Mercurial-specific first, followed by built-in and library exceptions
169 # Mercurial-specific first, followed by built-in and library exceptions
170 except error.LockHeld as inst:
170 except error.LockHeld as inst:
171 if inst.errno == errno.ETIMEDOUT:
171 if inst.errno == errno.ETIMEDOUT:
172 reason = _('timed out waiting for lock held by %r') % inst.locker
172 reason = _('timed out waiting for lock held by %r') % inst.locker
173 else:
173 else:
174 reason = _('lock held by %r') % inst.locker
174 reason = _('lock held by %r') % inst.locker
175 ui.error(_("abort: %s: %s\n") % (
175 ui.error(_("abort: %s: %s\n") % (
176 inst.desc or stringutil.forcebytestr(inst.filename), reason))
176 inst.desc or stringutil.forcebytestr(inst.filename), reason))
177 if not inst.locker:
177 if not inst.locker:
178 ui.error(_("(lock might be very busy)\n"))
178 ui.error(_("(lock might be very busy)\n"))
179 except error.LockUnavailable as inst:
179 except error.LockUnavailable as inst:
180 ui.error(_("abort: could not lock %s: %s\n") %
180 ui.error(_("abort: could not lock %s: %s\n") %
181 (inst.desc or stringutil.forcebytestr(inst.filename),
181 (inst.desc or stringutil.forcebytestr(inst.filename),
182 encoding.strtolocal(inst.strerror)))
182 encoding.strtolocal(inst.strerror)))
183 except error.OutOfBandError as inst:
183 except error.OutOfBandError as inst:
184 if inst.args:
184 if inst.args:
185 msg = _("abort: remote error:\n")
185 msg = _("abort: remote error:\n")
186 else:
186 else:
187 msg = _("abort: remote error\n")
187 msg = _("abort: remote error\n")
188 ui.error(msg)
188 ui.error(msg)
189 if inst.args:
189 if inst.args:
190 ui.error(''.join(inst.args))
190 ui.error(''.join(inst.args))
191 if inst.hint:
191 if inst.hint:
192 ui.error('(%s)\n' % inst.hint)
192 ui.error('(%s)\n' % inst.hint)
193 except error.RepoError as inst:
193 except error.RepoError as inst:
194 ui.error(_("abort: %s!\n") % inst)
194 ui.error(_("abort: %s!\n") % inst)
195 if inst.hint:
195 if inst.hint:
196 ui.error(_("(%s)\n") % inst.hint)
196 ui.error(_("(%s)\n") % inst.hint)
197 except error.ResponseError as inst:
197 except error.ResponseError as inst:
198 ui.error(_("abort: %s") % inst.args[0])
198 ui.error(_("abort: %s") % inst.args[0])
199 msg = inst.args[1]
199 msg = inst.args[1]
200 if isinstance(msg, type(u'')):
200 if isinstance(msg, type(u'')):
201 msg = pycompat.sysbytes(msg)
201 msg = pycompat.sysbytes(msg)
202 if not isinstance(msg, bytes):
202 if not isinstance(msg, bytes):
203 ui.error(" %r\n" % (msg,))
203 ui.error(" %r\n" % (msg,))
204 elif not msg:
204 elif not msg:
205 ui.error(_(" empty string\n"))
205 ui.error(_(" empty string\n"))
206 else:
206 else:
207 ui.error("\n%r\n" % pycompat.bytestr(stringutil.ellipsis(msg)))
207 ui.error("\n%r\n" % pycompat.bytestr(stringutil.ellipsis(msg)))
208 except error.CensoredNodeError as inst:
208 except error.CensoredNodeError as inst:
209 ui.error(_("abort: file censored %s!\n") % inst)
209 ui.error(_("abort: file censored %s!\n") % inst)
210 except error.RevlogError as inst:
210 except error.RevlogError as inst:
211 ui.error(_("abort: %s!\n") % inst)
211 ui.error(_("abort: %s!\n") % inst)
212 except error.InterventionRequired as inst:
212 except error.InterventionRequired as inst:
213 ui.error("%s\n" % inst)
213 ui.error("%s\n" % inst)
214 if inst.hint:
214 if inst.hint:
215 ui.error(_("(%s)\n") % inst.hint)
215 ui.error(_("(%s)\n") % inst.hint)
216 return 1
216 return 1
217 except error.WdirUnsupported:
217 except error.WdirUnsupported:
218 ui.error(_("abort: working directory revision cannot be specified\n"))
218 ui.error(_("abort: working directory revision cannot be specified\n"))
219 except error.Abort as inst:
219 except error.Abort as inst:
220 ui.error(_("abort: %s\n") % inst)
220 ui.error(_("abort: %s\n") % inst)
221 if inst.hint:
221 if inst.hint:
222 ui.error(_("(%s)\n") % inst.hint)
222 ui.error(_("(%s)\n") % inst.hint)
223 except ImportError as inst:
223 except ImportError as inst:
224 ui.error(_("abort: %s!\n") % stringutil.forcebytestr(inst))
224 ui.error(_("abort: %s!\n") % stringutil.forcebytestr(inst))
225 m = stringutil.forcebytestr(inst).split()[-1]
225 m = stringutil.forcebytestr(inst).split()[-1]
226 if m in "mpatch bdiff".split():
226 if m in "mpatch bdiff".split():
227 ui.error(_("(did you forget to compile extensions?)\n"))
227 ui.error(_("(did you forget to compile extensions?)\n"))
228 elif m in "zlib".split():
228 elif m in "zlib".split():
229 ui.error(_("(is your Python install correct?)\n"))
229 ui.error(_("(is your Python install correct?)\n"))
230 except IOError as inst:
230 except IOError as inst:
231 if util.safehasattr(inst, "code"):
231 if util.safehasattr(inst, "code"):
232 ui.error(_("abort: %s\n") % stringutil.forcebytestr(inst))
232 ui.error(_("abort: %s\n") % stringutil.forcebytestr(inst))
233 elif util.safehasattr(inst, "reason"):
233 elif util.safehasattr(inst, "reason"):
234 try: # usually it is in the form (errno, strerror)
234 try: # usually it is in the form (errno, strerror)
235 reason = inst.reason.args[1]
235 reason = inst.reason.args[1]
236 except (AttributeError, IndexError):
236 except (AttributeError, IndexError):
237 # it might be anything, for example a string
237 # it might be anything, for example a string
238 reason = inst.reason
238 reason = inst.reason
239 if isinstance(reason, pycompat.unicode):
239 if isinstance(reason, pycompat.unicode):
240 # SSLError of Python 2.7.9 contains a unicode
240 # SSLError of Python 2.7.9 contains a unicode
241 reason = encoding.unitolocal(reason)
241 reason = encoding.unitolocal(reason)
242 ui.error(_("abort: error: %s\n") % reason)
242 ui.error(_("abort: error: %s\n") % reason)
243 elif (util.safehasattr(inst, "args")
243 elif (util.safehasattr(inst, "args")
244 and inst.args and inst.args[0] == errno.EPIPE):
244 and inst.args and inst.args[0] == errno.EPIPE):
245 pass
245 pass
246 elif getattr(inst, "strerror", None):
246 elif getattr(inst, "strerror", None):
247 if getattr(inst, "filename", None):
247 if getattr(inst, "filename", None):
248 ui.error(_("abort: %s: %s\n") % (
248 ui.error(_("abort: %s: %s\n") % (
249 encoding.strtolocal(inst.strerror),
249 encoding.strtolocal(inst.strerror),
250 stringutil.forcebytestr(inst.filename)))
250 stringutil.forcebytestr(inst.filename)))
251 else:
251 else:
252 ui.error(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
252 ui.error(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
253 else:
253 else:
254 raise
254 raise
255 except OSError as inst:
255 except OSError as inst:
256 if getattr(inst, "filename", None) is not None:
256 if getattr(inst, "filename", None) is not None:
257 ui.error(_("abort: %s: '%s'\n") % (
257 ui.error(_("abort: %s: '%s'\n") % (
258 encoding.strtolocal(inst.strerror),
258 encoding.strtolocal(inst.strerror),
259 stringutil.forcebytestr(inst.filename)))
259 stringutil.forcebytestr(inst.filename)))
260 else:
260 else:
261 ui.error(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
261 ui.error(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
262 except MemoryError:
262 except MemoryError:
263 ui.error(_("abort: out of memory\n"))
263 ui.error(_("abort: out of memory\n"))
264 except SystemExit as inst:
264 except SystemExit as inst:
265 # Commands shouldn't sys.exit directly, but give a return code.
265 # Commands shouldn't sys.exit directly, but give a return code.
266 # Just in case catch this and and pass exit code to caller.
266 # Just in case catch this and and pass exit code to caller.
267 return inst.code
267 return inst.code
268 except socket.error as inst:
268 except socket.error as inst:
269 ui.error(_("abort: %s\n") % stringutil.forcebytestr(inst.args[-1]))
269 ui.error(_("abort: %s\n") % stringutil.forcebytestr(inst.args[-1]))
270
270
271 return -1
271 return -1
272
272
273 def checknewlabel(repo, lbl, kind):
273 def checknewlabel(repo, lbl, kind):
274 # Do not use the "kind" parameter in ui output.
274 # Do not use the "kind" parameter in ui output.
275 # It makes strings difficult to translate.
275 # It makes strings difficult to translate.
276 if lbl in ['tip', '.', 'null']:
276 if lbl in ['tip', '.', 'null']:
277 raise error.Abort(_("the name '%s' is reserved") % lbl)
277 raise error.Abort(_("the name '%s' is reserved") % lbl)
278 for c in (':', '\0', '\n', '\r'):
278 for c in (':', '\0', '\n', '\r'):
279 if c in lbl:
279 if c in lbl:
280 raise error.Abort(
280 raise error.Abort(
281 _("%r cannot be used in a name") % pycompat.bytestr(c))
281 _("%r cannot be used in a name") % pycompat.bytestr(c))
282 try:
282 try:
283 int(lbl)
283 int(lbl)
284 raise error.Abort(_("cannot use an integer as a name"))
284 raise error.Abort(_("cannot use an integer as a name"))
285 except ValueError:
285 except ValueError:
286 pass
286 pass
287 if lbl.strip() != lbl:
287 if lbl.strip() != lbl:
288 raise error.Abort(_("leading or trailing whitespace in name %r") % lbl)
288 raise error.Abort(_("leading or trailing whitespace in name %r") % lbl)
289
289
290 def checkfilename(f):
290 def checkfilename(f):
291 '''Check that the filename f is an acceptable filename for a tracked file'''
291 '''Check that the filename f is an acceptable filename for a tracked file'''
292 if '\r' in f or '\n' in f:
292 if '\r' in f or '\n' in f:
293 raise error.Abort(_("'\\n' and '\\r' disallowed in filenames: %r")
293 raise error.Abort(_("'\\n' and '\\r' disallowed in filenames: %r")
294 % pycompat.bytestr(f))
294 % pycompat.bytestr(f))
295
295
296 def checkportable(ui, f):
296 def checkportable(ui, f):
297 '''Check if filename f is portable and warn or abort depending on config'''
297 '''Check if filename f is portable and warn or abort depending on config'''
298 checkfilename(f)
298 checkfilename(f)
299 abort, warn = checkportabilityalert(ui)
299 abort, warn = checkportabilityalert(ui)
300 if abort or warn:
300 if abort or warn:
301 msg = util.checkwinfilename(f)
301 msg = util.checkwinfilename(f)
302 if msg:
302 if msg:
303 msg = "%s: %s" % (msg, procutil.shellquote(f))
303 msg = "%s: %s" % (msg, procutil.shellquote(f))
304 if abort:
304 if abort:
305 raise error.Abort(msg)
305 raise error.Abort(msg)
306 ui.warn(_("warning: %s\n") % msg)
306 ui.warn(_("warning: %s\n") % msg)
307
307
308 def checkportabilityalert(ui):
308 def checkportabilityalert(ui):
309 '''check if the user's config requests nothing, a warning, or abort for
309 '''check if the user's config requests nothing, a warning, or abort for
310 non-portable filenames'''
310 non-portable filenames'''
311 val = ui.config('ui', 'portablefilenames')
311 val = ui.config('ui', 'portablefilenames')
312 lval = val.lower()
312 lval = val.lower()
313 bval = stringutil.parsebool(val)
313 bval = stringutil.parsebool(val)
314 abort = pycompat.iswindows or lval == 'abort'
314 abort = pycompat.iswindows or lval == 'abort'
315 warn = bval or lval == 'warn'
315 warn = bval or lval == 'warn'
316 if bval is None and not (warn or abort or lval == 'ignore'):
316 if bval is None and not (warn or abort or lval == 'ignore'):
317 raise error.ConfigError(
317 raise error.ConfigError(
318 _("ui.portablefilenames value is invalid ('%s')") % val)
318 _("ui.portablefilenames value is invalid ('%s')") % val)
319 return abort, warn
319 return abort, warn
320
320
321 class casecollisionauditor(object):
321 class casecollisionauditor(object):
322 def __init__(self, ui, abort, dirstate):
322 def __init__(self, ui, abort, dirstate):
323 self._ui = ui
323 self._ui = ui
324 self._abort = abort
324 self._abort = abort
325 allfiles = '\0'.join(dirstate._map)
325 allfiles = '\0'.join(dirstate._map)
326 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
326 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
327 self._dirstate = dirstate
327 self._dirstate = dirstate
328 # The purpose of _newfiles is so that we don't complain about
328 # The purpose of _newfiles is so that we don't complain about
329 # case collisions if someone were to call this object with the
329 # case collisions if someone were to call this object with the
330 # same filename twice.
330 # same filename twice.
331 self._newfiles = set()
331 self._newfiles = set()
332
332
333 def __call__(self, f):
333 def __call__(self, f):
334 if f in self._newfiles:
334 if f in self._newfiles:
335 return
335 return
336 fl = encoding.lower(f)
336 fl = encoding.lower(f)
337 if fl in self._loweredfiles and f not in self._dirstate:
337 if fl in self._loweredfiles and f not in self._dirstate:
338 msg = _('possible case-folding collision for %s') % f
338 msg = _('possible case-folding collision for %s') % f
339 if self._abort:
339 if self._abort:
340 raise error.Abort(msg)
340 raise error.Abort(msg)
341 self._ui.warn(_("warning: %s\n") % msg)
341 self._ui.warn(_("warning: %s\n") % msg)
342 self._loweredfiles.add(fl)
342 self._loweredfiles.add(fl)
343 self._newfiles.add(f)
343 self._newfiles.add(f)
344
344
345 def filteredhash(repo, maxrev):
345 def filteredhash(repo, maxrev):
346 """build hash of filtered revisions in the current repoview.
346 """build hash of filtered revisions in the current repoview.
347
347
348 Multiple caches perform up-to-date validation by checking that the
348 Multiple caches perform up-to-date validation by checking that the
349 tiprev and tipnode stored in the cache file match the current repository.
349 tiprev and tipnode stored in the cache file match the current repository.
350 However, this is not sufficient for validating repoviews because the set
350 However, this is not sufficient for validating repoviews because the set
351 of revisions in the view may change without the repository tiprev and
351 of revisions in the view may change without the repository tiprev and
352 tipnode changing.
352 tipnode changing.
353
353
354 This function hashes all the revs filtered from the view and returns
354 This function hashes all the revs filtered from the view and returns
355 that SHA-1 digest.
355 that SHA-1 digest.
356 """
356 """
357 cl = repo.changelog
357 cl = repo.changelog
358 if not cl.filteredrevs:
358 if not cl.filteredrevs:
359 return None
359 return None
360 key = None
360 key = None
361 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
361 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
362 if revs:
362 if revs:
363 s = hashlib.sha1()
363 s = hashlib.sha1()
364 for rev in revs:
364 for rev in revs:
365 s.update('%d;' % rev)
365 s.update('%d;' % rev)
366 key = s.digest()
366 key = s.digest()
367 return key
367 return key
368
368
369 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
369 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
370 '''yield every hg repository under path, always recursively.
370 '''yield every hg repository under path, always recursively.
371 The recurse flag will only control recursion into repo working dirs'''
371 The recurse flag will only control recursion into repo working dirs'''
372 def errhandler(err):
372 def errhandler(err):
373 if err.filename == path:
373 if err.filename == path:
374 raise err
374 raise err
375 samestat = getattr(os.path, 'samestat', None)
375 samestat = getattr(os.path, 'samestat', None)
376 if followsym and samestat is not None:
376 if followsym and samestat is not None:
377 def adddir(dirlst, dirname):
377 def adddir(dirlst, dirname):
378 dirstat = os.stat(dirname)
378 dirstat = os.stat(dirname)
379 match = any(samestat(dirstat, lstdirstat) for lstdirstat in dirlst)
379 match = any(samestat(dirstat, lstdirstat) for lstdirstat in dirlst)
380 if not match:
380 if not match:
381 dirlst.append(dirstat)
381 dirlst.append(dirstat)
382 return not match
382 return not match
383 else:
383 else:
384 followsym = False
384 followsym = False
385
385
386 if (seen_dirs is None) and followsym:
386 if (seen_dirs is None) and followsym:
387 seen_dirs = []
387 seen_dirs = []
388 adddir(seen_dirs, path)
388 adddir(seen_dirs, path)
389 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
389 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
390 dirs.sort()
390 dirs.sort()
391 if '.hg' in dirs:
391 if '.hg' in dirs:
392 yield root # found a repository
392 yield root # found a repository
393 qroot = os.path.join(root, '.hg', 'patches')
393 qroot = os.path.join(root, '.hg', 'patches')
394 if os.path.isdir(os.path.join(qroot, '.hg')):
394 if os.path.isdir(os.path.join(qroot, '.hg')):
395 yield qroot # we have a patch queue repo here
395 yield qroot # we have a patch queue repo here
396 if recurse:
396 if recurse:
397 # avoid recursing inside the .hg directory
397 # avoid recursing inside the .hg directory
398 dirs.remove('.hg')
398 dirs.remove('.hg')
399 else:
399 else:
400 dirs[:] = [] # don't descend further
400 dirs[:] = [] # don't descend further
401 elif followsym:
401 elif followsym:
402 newdirs = []
402 newdirs = []
403 for d in dirs:
403 for d in dirs:
404 fname = os.path.join(root, d)
404 fname = os.path.join(root, d)
405 if adddir(seen_dirs, fname):
405 if adddir(seen_dirs, fname):
406 if os.path.islink(fname):
406 if os.path.islink(fname):
407 for hgname in walkrepos(fname, True, seen_dirs):
407 for hgname in walkrepos(fname, True, seen_dirs):
408 yield hgname
408 yield hgname
409 else:
409 else:
410 newdirs.append(d)
410 newdirs.append(d)
411 dirs[:] = newdirs
411 dirs[:] = newdirs
412
412
413 def binnode(ctx):
413 def binnode(ctx):
414 """Return binary node id for a given basectx"""
414 """Return binary node id for a given basectx"""
415 node = ctx.node()
415 node = ctx.node()
416 if node is None:
416 if node is None:
417 return wdirid
417 return wdirid
418 return node
418 return node
419
419
420 def intrev(ctx):
420 def intrev(ctx):
421 """Return integer for a given basectx that can be used in comparison or
421 """Return integer for a given basectx that can be used in comparison or
422 arithmetic operation"""
422 arithmetic operation"""
423 rev = ctx.rev()
423 rev = ctx.rev()
424 if rev is None:
424 if rev is None:
425 return wdirrev
425 return wdirrev
426 return rev
426 return rev
427
427
428 def formatchangeid(ctx):
428 def formatchangeid(ctx):
429 """Format changectx as '{rev}:{node|formatnode}', which is the default
429 """Format changectx as '{rev}:{node|formatnode}', which is the default
430 template provided by logcmdutil.changesettemplater"""
430 template provided by logcmdutil.changesettemplater"""
431 repo = ctx.repo()
431 repo = ctx.repo()
432 return formatrevnode(repo.ui, intrev(ctx), binnode(ctx))
432 return formatrevnode(repo.ui, intrev(ctx), binnode(ctx))
433
433
434 def formatrevnode(ui, rev, node):
434 def formatrevnode(ui, rev, node):
435 """Format given revision and node depending on the current verbosity"""
435 """Format given revision and node depending on the current verbosity"""
436 if ui.debugflag:
436 if ui.debugflag:
437 hexfunc = hex
437 hexfunc = hex
438 else:
438 else:
439 hexfunc = short
439 hexfunc = short
440 return '%d:%s' % (rev, hexfunc(node))
440 return '%d:%s' % (rev, hexfunc(node))
441
441
442 def resolvehexnodeidprefix(repo, prefix):
442 def resolvehexnodeidprefix(repo, prefix):
443 if (prefix.startswith('x') and
443 if (prefix.startswith('x') and
444 repo.ui.configbool('experimental', 'revisions.prefixhexnode')):
444 repo.ui.configbool('experimental', 'revisions.prefixhexnode')):
445 prefix = prefix[1:]
445 prefix = prefix[1:]
446 try:
446 try:
447 # Uses unfiltered repo because it's faster when prefix is ambiguous/
447 # Uses unfiltered repo because it's faster when prefix is ambiguous/
448 # This matches the shortesthexnodeidprefix() function below.
448 # This matches the shortesthexnodeidprefix() function below.
449 node = repo.unfiltered().changelog._partialmatch(prefix)
449 node = repo.unfiltered().changelog._partialmatch(prefix)
450 except error.AmbiguousPrefixLookupError:
450 except error.AmbiguousPrefixLookupError:
451 revset = repo.ui.config('experimental', 'revisions.disambiguatewithin')
451 revset = repo.ui.config('experimental', 'revisions.disambiguatewithin')
452 if revset:
452 if revset:
453 # Clear config to avoid infinite recursion
453 # Clear config to avoid infinite recursion
454 configoverrides = {('experimental',
454 configoverrides = {('experimental',
455 'revisions.disambiguatewithin'): None}
455 'revisions.disambiguatewithin'): None}
456 with repo.ui.configoverride(configoverrides):
456 with repo.ui.configoverride(configoverrides):
457 revs = repo.anyrevs([revset], user=True)
457 revs = repo.anyrevs([revset], user=True)
458 matches = []
458 matches = []
459 for rev in revs:
459 for rev in revs:
460 node = repo.changelog.node(rev)
460 node = repo.changelog.node(rev)
461 if hex(node).startswith(prefix):
461 if hex(node).startswith(prefix):
462 matches.append(node)
462 matches.append(node)
463 if len(matches) == 1:
463 if len(matches) == 1:
464 return matches[0]
464 return matches[0]
465 raise
465 raise
466 if node is None:
466 if node is None:
467 return
467 return
468 repo.changelog.rev(node) # make sure node isn't filtered
468 repo.changelog.rev(node) # make sure node isn't filtered
469 return node
469 return node
470
470
471 def mayberevnum(repo, prefix):
471 def mayberevnum(repo, prefix):
472 """Checks if the given prefix may be mistaken for a revision number"""
472 """Checks if the given prefix may be mistaken for a revision number"""
473 try:
473 try:
474 i = int(prefix)
474 i = int(prefix)
475 # if we are a pure int, then starting with zero will not be
475 # if we are a pure int, then starting with zero will not be
476 # confused as a rev; or, obviously, if the int is larger
476 # confused as a rev; or, obviously, if the int is larger
477 # than the value of the tip rev
477 # than the value of the tip rev
478 if prefix[0:1] == b'0' or i >= len(repo):
478 if prefix[0:1] == b'0' or i >= len(repo):
479 return False
479 return False
480 return True
480 return True
481 except ValueError:
481 except ValueError:
482 return False
482 return False
483
483
484 def shortesthexnodeidprefix(repo, node, minlength=1, cache=None):
484 def shortesthexnodeidprefix(repo, node, minlength=1, cache=None):
485 """Find the shortest unambiguous prefix that matches hexnode.
485 """Find the shortest unambiguous prefix that matches hexnode.
486
486
487 If "cache" is not None, it must be a dictionary that can be used for
487 If "cache" is not None, it must be a dictionary that can be used for
488 caching between calls to this method.
488 caching between calls to this method.
489 """
489 """
490 # _partialmatch() of filtered changelog could take O(len(repo)) time,
490 # _partialmatch() of filtered changelog could take O(len(repo)) time,
491 # which would be unacceptably slow. so we look for hash collision in
491 # which would be unacceptably slow. so we look for hash collision in
492 # unfiltered space, which means some hashes may be slightly longer.
492 # unfiltered space, which means some hashes may be slightly longer.
493
493
494 def disambiguate(prefix):
494 def disambiguate(prefix):
495 """Disambiguate against revnums."""
495 """Disambiguate against revnums."""
496 if repo.ui.configbool('experimental', 'revisions.prefixhexnode'):
496 if repo.ui.configbool('experimental', 'revisions.prefixhexnode'):
497 if mayberevnum(repo, prefix):
497 if mayberevnum(repo, prefix):
498 return 'x' + prefix
498 return 'x' + prefix
499 else:
499 else:
500 return prefix
500 return prefix
501
501
502 hexnode = hex(node)
502 hexnode = hex(node)
503 for length in range(len(prefix), len(hexnode) + 1):
503 for length in range(len(prefix), len(hexnode) + 1):
504 prefix = hexnode[:length]
504 prefix = hexnode[:length]
505 if not mayberevnum(repo, prefix):
505 if not mayberevnum(repo, prefix):
506 return prefix
506 return prefix
507
507
508 cl = repo.unfiltered().changelog
508 cl = repo.unfiltered().changelog
509 revset = repo.ui.config('experimental', 'revisions.disambiguatewithin')
509 revset = repo.ui.config('experimental', 'revisions.disambiguatewithin')
510 if revset:
510 if revset:
511 revs = None
511 revs = None
512 if cache is not None:
512 if cache is not None:
513 revs = cache.get('disambiguationrevset')
513 revs = cache.get('disambiguationrevset')
514 if revs is None:
514 if revs is None:
515 revs = repo.anyrevs([revset], user=True)
515 revs = repo.anyrevs([revset], user=True)
516 if cache is not None:
516 if cache is not None:
517 cache['disambiguationrevset'] = revs
517 cache['disambiguationrevset'] = revs
518 if cl.rev(node) in revs:
518 if cl.rev(node) in revs:
519 hexnode = hex(node)
519 hexnode = hex(node)
520 nodetree = None
520 nodetree = None
521 if cache is not None:
521 if cache is not None:
522 nodetree = cache.get('disambiguationnodetree')
522 nodetree = cache.get('disambiguationnodetree')
523 if not nodetree:
523 if not nodetree:
524 try:
524 try:
525 nodetree = parsers.nodetree(cl.index, len(revs))
525 nodetree = parsers.nodetree(cl.index, len(revs))
526 except AttributeError:
526 except AttributeError:
527 # no native nodetree
527 # no native nodetree
528 pass
528 pass
529 else:
529 else:
530 for r in revs:
530 for r in revs:
531 nodetree.insert(r)
531 nodetree.insert(r)
532 if cache is not None:
532 if cache is not None:
533 cache['disambiguationnodetree'] = nodetree
533 cache['disambiguationnodetree'] = nodetree
534 if nodetree is not None:
534 if nodetree is not None:
535 length = max(nodetree.shortest(node), minlength)
535 length = max(nodetree.shortest(node), minlength)
536 prefix = hexnode[:length]
536 prefix = hexnode[:length]
537 return disambiguate(prefix)
537 return disambiguate(prefix)
538 for length in range(minlength, len(hexnode) + 1):
538 for length in range(minlength, len(hexnode) + 1):
539 matches = []
539 matches = []
540 prefix = hexnode[:length]
540 prefix = hexnode[:length]
541 for rev in revs:
541 for rev in revs:
542 otherhexnode = repo[rev].hex()
542 otherhexnode = repo[rev].hex()
543 if prefix == otherhexnode[:length]:
543 if prefix == otherhexnode[:length]:
544 matches.append(otherhexnode)
544 matches.append(otherhexnode)
545 if len(matches) == 1:
545 if len(matches) == 1:
546 return disambiguate(prefix)
546 return disambiguate(prefix)
547
547
548 try:
548 try:
549 return disambiguate(cl.shortest(node, minlength))
549 return disambiguate(cl.shortest(node, minlength))
550 except error.LookupError:
550 except error.LookupError:
551 raise error.RepoLookupError()
551 raise error.RepoLookupError()
552
552
553 def isrevsymbol(repo, symbol):
553 def isrevsymbol(repo, symbol):
554 """Checks if a symbol exists in the repo.
554 """Checks if a symbol exists in the repo.
555
555
556 See revsymbol() for details. Raises error.AmbiguousPrefixLookupError if the
556 See revsymbol() for details. Raises error.AmbiguousPrefixLookupError if the
557 symbol is an ambiguous nodeid prefix.
557 symbol is an ambiguous nodeid prefix.
558 """
558 """
559 try:
559 try:
560 revsymbol(repo, symbol)
560 revsymbol(repo, symbol)
561 return True
561 return True
562 except error.RepoLookupError:
562 except error.RepoLookupError:
563 return False
563 return False
564
564
565 def revsymbol(repo, symbol):
565 def revsymbol(repo, symbol):
566 """Returns a context given a single revision symbol (as string).
566 """Returns a context given a single revision symbol (as string).
567
567
568 This is similar to revsingle(), but accepts only a single revision symbol,
568 This is similar to revsingle(), but accepts only a single revision symbol,
569 i.e. things like ".", "tip", "1234", "deadbeef", "my-bookmark" work, but
569 i.e. things like ".", "tip", "1234", "deadbeef", "my-bookmark" work, but
570 not "max(public())".
570 not "max(public())".
571 """
571 """
572 if not isinstance(symbol, bytes):
572 if not isinstance(symbol, bytes):
573 msg = ("symbol (%s of type %s) was not a string, did you mean "
573 msg = ("symbol (%s of type %s) was not a string, did you mean "
574 "repo[symbol]?" % (symbol, type(symbol)))
574 "repo[symbol]?" % (symbol, type(symbol)))
575 raise error.ProgrammingError(msg)
575 raise error.ProgrammingError(msg)
576 try:
576 try:
577 if symbol in ('.', 'tip', 'null'):
577 if symbol in ('.', 'tip', 'null'):
578 return repo[symbol]
578 return repo[symbol]
579
579
580 try:
580 try:
581 r = int(symbol)
581 r = int(symbol)
582 if '%d' % r != symbol:
582 if '%d' % r != symbol:
583 raise ValueError
583 raise ValueError
584 l = len(repo.changelog)
584 l = len(repo.changelog)
585 if r < 0:
585 if r < 0:
586 r += l
586 r += l
587 if r < 0 or r >= l and r != wdirrev:
587 if r < 0 or r >= l and r != wdirrev:
588 raise ValueError
588 raise ValueError
589 return repo[r]
589 return repo[r]
590 except error.FilteredIndexError:
590 except error.FilteredIndexError:
591 raise
591 raise
592 except (ValueError, OverflowError, IndexError):
592 except (ValueError, OverflowError, IndexError):
593 pass
593 pass
594
594
595 if len(symbol) == 40:
595 if len(symbol) == 40:
596 try:
596 try:
597 node = bin(symbol)
597 node = bin(symbol)
598 rev = repo.changelog.rev(node)
598 rev = repo.changelog.rev(node)
599 return repo[rev]
599 return repo[rev]
600 except error.FilteredLookupError:
600 except error.FilteredLookupError:
601 raise
601 raise
602 except (TypeError, LookupError):
602 except (TypeError, LookupError):
603 pass
603 pass
604
604
605 # look up bookmarks through the name interface
605 # look up bookmarks through the name interface
606 try:
606 try:
607 node = repo.names.singlenode(repo, symbol)
607 node = repo.names.singlenode(repo, symbol)
608 rev = repo.changelog.rev(node)
608 rev = repo.changelog.rev(node)
609 return repo[rev]
609 return repo[rev]
610 except KeyError:
610 except KeyError:
611 pass
611 pass
612
612
613 node = resolvehexnodeidprefix(repo, symbol)
613 node = resolvehexnodeidprefix(repo, symbol)
614 if node is not None:
614 if node is not None:
615 rev = repo.changelog.rev(node)
615 rev = repo.changelog.rev(node)
616 return repo[rev]
616 return repo[rev]
617
617
618 raise error.RepoLookupError(_("unknown revision '%s'") % symbol)
618 raise error.RepoLookupError(_("unknown revision '%s'") % symbol)
619
619
620 except error.WdirUnsupported:
620 except error.WdirUnsupported:
621 return repo[None]
621 return repo[None]
622 except (error.FilteredIndexError, error.FilteredLookupError,
622 except (error.FilteredIndexError, error.FilteredLookupError,
623 error.FilteredRepoLookupError):
623 error.FilteredRepoLookupError):
624 raise _filterederror(repo, symbol)
624 raise _filterederror(repo, symbol)
625
625
626 def _filterederror(repo, changeid):
626 def _filterederror(repo, changeid):
627 """build an exception to be raised about a filtered changeid
627 """build an exception to be raised about a filtered changeid
628
628
629 This is extracted in a function to help extensions (eg: evolve) to
629 This is extracted in a function to help extensions (eg: evolve) to
630 experiment with various message variants."""
630 experiment with various message variants."""
631 if repo.filtername.startswith('visible'):
631 if repo.filtername.startswith('visible'):
632
632
633 # Check if the changeset is obsolete
633 # Check if the changeset is obsolete
634 unfilteredrepo = repo.unfiltered()
634 unfilteredrepo = repo.unfiltered()
635 ctx = revsymbol(unfilteredrepo, changeid)
635 ctx = revsymbol(unfilteredrepo, changeid)
636
636
637 # If the changeset is obsolete, enrich the message with the reason
637 # If the changeset is obsolete, enrich the message with the reason
638 # that made this changeset not visible
638 # that made this changeset not visible
639 if ctx.obsolete():
639 if ctx.obsolete():
640 msg = obsutil._getfilteredreason(repo, changeid, ctx)
640 msg = obsutil._getfilteredreason(repo, changeid, ctx)
641 else:
641 else:
642 msg = _("hidden revision '%s'") % changeid
642 msg = _("hidden revision '%s'") % changeid
643
643
644 hint = _('use --hidden to access hidden revisions')
644 hint = _('use --hidden to access hidden revisions')
645
645
646 return error.FilteredRepoLookupError(msg, hint=hint)
646 return error.FilteredRepoLookupError(msg, hint=hint)
647 msg = _("filtered revision '%s' (not in '%s' subset)")
647 msg = _("filtered revision '%s' (not in '%s' subset)")
648 msg %= (changeid, repo.filtername)
648 msg %= (changeid, repo.filtername)
649 return error.FilteredRepoLookupError(msg)
649 return error.FilteredRepoLookupError(msg)
650
650
651 def revsingle(repo, revspec, default='.', localalias=None):
651 def revsingle(repo, revspec, default='.', localalias=None):
652 if not revspec and revspec != 0:
652 if not revspec and revspec != 0:
653 return repo[default]
653 return repo[default]
654
654
655 l = revrange(repo, [revspec], localalias=localalias)
655 l = revrange(repo, [revspec], localalias=localalias)
656 if not l:
656 if not l:
657 raise error.Abort(_('empty revision set'))
657 raise error.Abort(_('empty revision set'))
658 return repo[l.last()]
658 return repo[l.last()]
659
659
660 def _pairspec(revspec):
660 def _pairspec(revspec):
661 tree = revsetlang.parse(revspec)
661 tree = revsetlang.parse(revspec)
662 return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall')
662 return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall')
663
663
664 def revpair(repo, revs):
664 def revpair(repo, revs):
665 if not revs:
665 if not revs:
666 return repo['.'], repo[None]
666 return repo['.'], repo[None]
667
667
668 l = revrange(repo, revs)
668 l = revrange(repo, revs)
669
669
670 if not l:
670 if not l:
671 first = second = None
671 first = second = None
672 elif l.isascending():
672 elif l.isascending():
673 first = l.min()
673 first = l.min()
674 second = l.max()
674 second = l.max()
675 elif l.isdescending():
675 elif l.isdescending():
676 first = l.max()
676 first = l.max()
677 second = l.min()
677 second = l.min()
678 else:
678 else:
679 first = l.first()
679 first = l.first()
680 second = l.last()
680 second = l.last()
681
681
682 if first is None:
682 if first is None:
683 raise error.Abort(_('empty revision range'))
683 raise error.Abort(_('empty revision range'))
684 if (first == second and len(revs) >= 2
684 if (first == second and len(revs) >= 2
685 and not all(revrange(repo, [r]) for r in revs)):
685 and not all(revrange(repo, [r]) for r in revs)):
686 raise error.Abort(_('empty revision on one side of range'))
686 raise error.Abort(_('empty revision on one side of range'))
687
687
688 # if top-level is range expression, the result must always be a pair
688 # if top-level is range expression, the result must always be a pair
689 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
689 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
690 return repo[first], repo[None]
690 return repo[first], repo[None]
691
691
692 return repo[first], repo[second]
692 return repo[first], repo[second]
693
693
694 def revrange(repo, specs, localalias=None):
694 def revrange(repo, specs, localalias=None):
695 """Execute 1 to many revsets and return the union.
695 """Execute 1 to many revsets and return the union.
696
696
697 This is the preferred mechanism for executing revsets using user-specified
697 This is the preferred mechanism for executing revsets using user-specified
698 config options, such as revset aliases.
698 config options, such as revset aliases.
699
699
700 The revsets specified by ``specs`` will be executed via a chained ``OR``
700 The revsets specified by ``specs`` will be executed via a chained ``OR``
701 expression. If ``specs`` is empty, an empty result is returned.
701 expression. If ``specs`` is empty, an empty result is returned.
702
702
703 ``specs`` can contain integers, in which case they are assumed to be
703 ``specs`` can contain integers, in which case they are assumed to be
704 revision numbers.
704 revision numbers.
705
705
706 It is assumed the revsets are already formatted. If you have arguments
706 It is assumed the revsets are already formatted. If you have arguments
707 that need to be expanded in the revset, call ``revsetlang.formatspec()``
707 that need to be expanded in the revset, call ``revsetlang.formatspec()``
708 and pass the result as an element of ``specs``.
708 and pass the result as an element of ``specs``.
709
709
710 Specifying a single revset is allowed.
710 Specifying a single revset is allowed.
711
711
712 Returns a ``revset.abstractsmartset`` which is a list-like interface over
712 Returns a ``revset.abstractsmartset`` which is a list-like interface over
713 integer revisions.
713 integer revisions.
714 """
714 """
715 allspecs = []
715 allspecs = []
716 for spec in specs:
716 for spec in specs:
717 if isinstance(spec, int):
717 if isinstance(spec, int):
718 spec = revsetlang.formatspec('rev(%d)', spec)
718 spec = revsetlang.formatspec('rev(%d)', spec)
719 allspecs.append(spec)
719 allspecs.append(spec)
720 return repo.anyrevs(allspecs, user=True, localalias=localalias)
720 return repo.anyrevs(allspecs, user=True, localalias=localalias)
721
721
722 def meaningfulparents(repo, ctx):
722 def meaningfulparents(repo, ctx):
723 """Return list of meaningful (or all if debug) parentrevs for rev.
723 """Return list of meaningful (or all if debug) parentrevs for rev.
724
724
725 For merges (two non-nullrev revisions) both parents are meaningful.
725 For merges (two non-nullrev revisions) both parents are meaningful.
726 Otherwise the first parent revision is considered meaningful if it
726 Otherwise the first parent revision is considered meaningful if it
727 is not the preceding revision.
727 is not the preceding revision.
728 """
728 """
729 parents = ctx.parents()
729 parents = ctx.parents()
730 if len(parents) > 1:
730 if len(parents) > 1:
731 return parents
731 return parents
732 if repo.ui.debugflag:
732 if repo.ui.debugflag:
733 return [parents[0], repo['null']]
733 return [parents[0], repo['null']]
734 if parents[0].rev() >= intrev(ctx) - 1:
734 if parents[0].rev() >= intrev(ctx) - 1:
735 return []
735 return []
736 return parents
736 return parents
737
737
738 def expandpats(pats):
738 def expandpats(pats):
739 '''Expand bare globs when running on windows.
739 '''Expand bare globs when running on windows.
740 On posix we assume it already has already been done by sh.'''
740 On posix we assume it already has already been done by sh.'''
741 if not util.expandglobs:
741 if not util.expandglobs:
742 return list(pats)
742 return list(pats)
743 ret = []
743 ret = []
744 for kindpat in pats:
744 for kindpat in pats:
745 kind, pat = matchmod._patsplit(kindpat, None)
745 kind, pat = matchmod._patsplit(kindpat, None)
746 if kind is None:
746 if kind is None:
747 try:
747 try:
748 globbed = glob.glob(pat)
748 globbed = glob.glob(pat)
749 except re.error:
749 except re.error:
750 globbed = [pat]
750 globbed = [pat]
751 if globbed:
751 if globbed:
752 ret.extend(globbed)
752 ret.extend(globbed)
753 continue
753 continue
754 ret.append(kindpat)
754 ret.append(kindpat)
755 return ret
755 return ret
756
756
757 def matchandpats(ctx, pats=(), opts=None, globbed=False, default='relpath',
757 def matchandpats(ctx, pats=(), opts=None, globbed=False, default='relpath',
758 badfn=None):
758 badfn=None):
759 '''Return a matcher and the patterns that were used.
759 '''Return a matcher and the patterns that were used.
760 The matcher will warn about bad matches, unless an alternate badfn callback
760 The matcher will warn about bad matches, unless an alternate badfn callback
761 is provided.'''
761 is provided.'''
762 if pats == ("",):
762 if pats == ("",):
763 pats = []
763 pats = []
764 if opts is None:
764 if opts is None:
765 opts = {}
765 opts = {}
766 if not globbed and default == 'relpath':
766 if not globbed and default == 'relpath':
767 pats = expandpats(pats or [])
767 pats = expandpats(pats or [])
768
768
769 def bad(f, msg):
769 def bad(f, msg):
770 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
770 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
771
771
772 if badfn is None:
772 if badfn is None:
773 badfn = bad
773 badfn = bad
774
774
775 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
775 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
776 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
776 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
777
777
778 if m.always():
778 if m.always():
779 pats = []
779 pats = []
780 return m, pats
780 return m, pats
781
781
782 def match(ctx, pats=(), opts=None, globbed=False, default='relpath',
782 def match(ctx, pats=(), opts=None, globbed=False, default='relpath',
783 badfn=None):
783 badfn=None):
784 '''Return a matcher that will warn about bad matches.'''
784 '''Return a matcher that will warn about bad matches.'''
785 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
785 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
786
786
787 def matchall(repo):
787 def matchall(repo):
788 '''Return a matcher that will efficiently match everything.'''
788 '''Return a matcher that will efficiently match everything.'''
789 return matchmod.always(repo.root, repo.getcwd())
789 return matchmod.always(repo.root, repo.getcwd())
790
790
791 def matchfiles(repo, files, badfn=None):
791 def matchfiles(repo, files, badfn=None):
792 '''Return a matcher that will efficiently match exactly these files.'''
792 '''Return a matcher that will efficiently match exactly these files.'''
793 return matchmod.exact(repo.root, repo.getcwd(), files, badfn=badfn)
793 return matchmod.exact(repo.root, repo.getcwd(), files, badfn=badfn)
794
794
795 def parsefollowlinespattern(repo, rev, pat, msg):
795 def parsefollowlinespattern(repo, rev, pat, msg):
796 """Return a file name from `pat` pattern suitable for usage in followlines
796 """Return a file name from `pat` pattern suitable for usage in followlines
797 logic.
797 logic.
798 """
798 """
799 if not matchmod.patkind(pat):
799 if not matchmod.patkind(pat):
800 return pathutil.canonpath(repo.root, repo.getcwd(), pat)
800 return pathutil.canonpath(repo.root, repo.getcwd(), pat)
801 else:
801 else:
802 ctx = repo[rev]
802 ctx = repo[rev]
803 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=ctx)
803 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=ctx)
804 files = [f for f in ctx if m(f)]
804 files = [f for f in ctx if m(f)]
805 if len(files) != 1:
805 if len(files) != 1:
806 raise error.ParseError(msg)
806 raise error.ParseError(msg)
807 return files[0]
807 return files[0]
808
808
809 def origpath(ui, repo, filepath):
809 def origpath(ui, repo, filepath):
810 '''customize where .orig files are created
810 '''customize where .orig files are created
811
811
812 Fetch user defined path from config file: [ui] origbackuppath = <path>
812 Fetch user defined path from config file: [ui] origbackuppath = <path>
813 Fall back to default (filepath with .orig suffix) if not specified
813 Fall back to default (filepath with .orig suffix) if not specified
814 '''
814 '''
815 origbackuppath = ui.config('ui', 'origbackuppath')
815 origbackuppath = ui.config('ui', 'origbackuppath')
816 if not origbackuppath:
816 if not origbackuppath:
817 return filepath + ".orig"
817 return filepath + ".orig"
818
818
819 # Convert filepath from an absolute path into a path inside the repo.
819 # Convert filepath from an absolute path into a path inside the repo.
820 filepathfromroot = util.normpath(os.path.relpath(filepath,
820 filepathfromroot = util.normpath(os.path.relpath(filepath,
821 start=repo.root))
821 start=repo.root))
822
822
823 origvfs = vfs.vfs(repo.wjoin(origbackuppath))
823 origvfs = vfs.vfs(repo.wjoin(origbackuppath))
824 origbackupdir = origvfs.dirname(filepathfromroot)
824 origbackupdir = origvfs.dirname(filepathfromroot)
825 if not origvfs.isdir(origbackupdir) or origvfs.islink(origbackupdir):
825 if not origvfs.isdir(origbackupdir) or origvfs.islink(origbackupdir):
826 ui.note(_('creating directory: %s\n') % origvfs.join(origbackupdir))
826 ui.note(_('creating directory: %s\n') % origvfs.join(origbackupdir))
827
827
828 # Remove any files that conflict with the backup file's path
828 # Remove any files that conflict with the backup file's path
829 for f in reversed(list(util.finddirs(filepathfromroot))):
829 for f in reversed(list(util.finddirs(filepathfromroot))):
830 if origvfs.isfileorlink(f):
830 if origvfs.isfileorlink(f):
831 ui.note(_('removing conflicting file: %s\n')
831 ui.note(_('removing conflicting file: %s\n')
832 % origvfs.join(f))
832 % origvfs.join(f))
833 origvfs.unlink(f)
833 origvfs.unlink(f)
834 break
834 break
835
835
836 origvfs.makedirs(origbackupdir)
836 origvfs.makedirs(origbackupdir)
837
837
838 if origvfs.isdir(filepathfromroot) and not origvfs.islink(filepathfromroot):
838 if origvfs.isdir(filepathfromroot) and not origvfs.islink(filepathfromroot):
839 ui.note(_('removing conflicting directory: %s\n')
839 ui.note(_('removing conflicting directory: %s\n')
840 % origvfs.join(filepathfromroot))
840 % origvfs.join(filepathfromroot))
841 origvfs.rmtree(filepathfromroot, forcibly=True)
841 origvfs.rmtree(filepathfromroot, forcibly=True)
842
842
843 return origvfs.join(filepathfromroot)
843 return origvfs.join(filepathfromroot)
844
844
845 class _containsnode(object):
845 class _containsnode(object):
846 """proxy __contains__(node) to container.__contains__ which accepts revs"""
846 """proxy __contains__(node) to container.__contains__ which accepts revs"""
847
847
848 def __init__(self, repo, revcontainer):
848 def __init__(self, repo, revcontainer):
849 self._torev = repo.changelog.rev
849 self._torev = repo.changelog.rev
850 self._revcontains = revcontainer.__contains__
850 self._revcontains = revcontainer.__contains__
851
851
852 def __contains__(self, node):
852 def __contains__(self, node):
853 return self._revcontains(self._torev(node))
853 return self._revcontains(self._torev(node))
854
854
855 def cleanupnodes(repo, replacements, operation, moves=None, metadata=None,
855 def cleanupnodes(repo, replacements, operation, moves=None, metadata=None,
856 fixphase=False, targetphase=None, backup=True):
856 fixphase=False, targetphase=None, backup=True):
857 """do common cleanups when old nodes are replaced by new nodes
857 """do common cleanups when old nodes are replaced by new nodes
858
858
859 That includes writing obsmarkers or stripping nodes, and moving bookmarks.
859 That includes writing obsmarkers or stripping nodes, and moving bookmarks.
860 (we might also want to move working directory parent in the future)
860 (we might also want to move working directory parent in the future)
861
861
862 By default, bookmark moves are calculated automatically from 'replacements',
862 By default, bookmark moves are calculated automatically from 'replacements',
863 but 'moves' can be used to override that. Also, 'moves' may include
863 but 'moves' can be used to override that. Also, 'moves' may include
864 additional bookmark moves that should not have associated obsmarkers.
864 additional bookmark moves that should not have associated obsmarkers.
865
865
866 replacements is {oldnode: [newnode]} or a iterable of nodes if they do not
866 replacements is {oldnode: [newnode]} or a iterable of nodes if they do not
867 have replacements. operation is a string, like "rebase".
867 have replacements. operation is a string, like "rebase".
868
868
869 metadata is dictionary containing metadata to be stored in obsmarker if
869 metadata is dictionary containing metadata to be stored in obsmarker if
870 obsolescence is enabled.
870 obsolescence is enabled.
871 """
871 """
872 assert fixphase or targetphase is None
872 assert fixphase or targetphase is None
873 if not replacements and not moves:
873 if not replacements and not moves:
874 return
874 return
875
875
876 # translate mapping's other forms
876 # translate mapping's other forms
877 if not util.safehasattr(replacements, 'items'):
877 if not util.safehasattr(replacements, 'items'):
878 replacements = {n: () for n in replacements}
878 replacements = {n: () for n in replacements}
879
879
880 # Calculate bookmark movements
880 # Calculate bookmark movements
881 if moves is None:
881 if moves is None:
882 moves = {}
882 moves = {}
883 # Unfiltered repo is needed since nodes in replacements might be hidden.
883 # Unfiltered repo is needed since nodes in replacements might be hidden.
884 unfi = repo.unfiltered()
884 unfi = repo.unfiltered()
885 for oldnode, newnodes in replacements.items():
885 for oldnode, newnodes in replacements.items():
886 if oldnode in moves:
886 if oldnode in moves:
887 continue
887 continue
888 if len(newnodes) > 1:
888 if len(newnodes) > 1:
889 # usually a split, take the one with biggest rev number
889 # usually a split, take the one with biggest rev number
890 newnode = next(unfi.set('max(%ln)', newnodes)).node()
890 newnode = next(unfi.set('max(%ln)', newnodes)).node()
891 elif len(newnodes) == 0:
891 elif len(newnodes) == 0:
892 # move bookmark backwards
892 # move bookmark backwards
893 roots = list(unfi.set('max((::%n) - %ln)', oldnode,
893 roots = list(unfi.set('max((::%n) - %ln)', oldnode,
894 list(replacements)))
894 list(replacements)))
895 if roots:
895 if roots:
896 newnode = roots[0].node()
896 newnode = roots[0].node()
897 else:
897 else:
898 newnode = nullid
898 newnode = nullid
899 else:
899 else:
900 newnode = newnodes[0]
900 newnode = newnodes[0]
901 moves[oldnode] = newnode
901 moves[oldnode] = newnode
902
902
903 allnewnodes = [n for ns in replacements.values() for n in ns]
903 allnewnodes = [n for ns in replacements.values() for n in ns]
904 toretract = {}
904 toretract = {}
905 toadvance = {}
905 toadvance = {}
906 if fixphase:
906 if fixphase:
907 precursors = {}
907 precursors = {}
908 for oldnode, newnodes in replacements.items():
908 for oldnode, newnodes in replacements.items():
909 for newnode in newnodes:
909 for newnode in newnodes:
910 precursors.setdefault(newnode, []).append(oldnode)
910 precursors.setdefault(newnode, []).append(oldnode)
911
911
912 allnewnodes.sort(key=lambda n: unfi[n].rev())
912 allnewnodes.sort(key=lambda n: unfi[n].rev())
913 newphases = {}
913 newphases = {}
914 def phase(ctx):
914 def phase(ctx):
915 return newphases.get(ctx.node(), ctx.phase())
915 return newphases.get(ctx.node(), ctx.phase())
916 for newnode in allnewnodes:
916 for newnode in allnewnodes:
917 ctx = unfi[newnode]
917 ctx = unfi[newnode]
918 parentphase = max(phase(p) for p in ctx.parents())
918 parentphase = max(phase(p) for p in ctx.parents())
919 if targetphase is None:
919 if targetphase is None:
920 oldphase = max(unfi[oldnode].phase()
920 oldphase = max(unfi[oldnode].phase()
921 for oldnode in precursors[newnode])
921 for oldnode in precursors[newnode])
922 newphase = max(oldphase, parentphase)
922 newphase = max(oldphase, parentphase)
923 else:
923 else:
924 newphase = max(targetphase, parentphase)
924 newphase = max(targetphase, parentphase)
925 newphases[newnode] = newphase
925 newphases[newnode] = newphase
926 if newphase > ctx.phase():
926 if newphase > ctx.phase():
927 toretract.setdefault(newphase, []).append(newnode)
927 toretract.setdefault(newphase, []).append(newnode)
928 elif newphase < ctx.phase():
928 elif newphase < ctx.phase():
929 toadvance.setdefault(newphase, []).append(newnode)
929 toadvance.setdefault(newphase, []).append(newnode)
930
930
931 with repo.transaction('cleanup') as tr:
931 with repo.transaction('cleanup') as tr:
932 # Move bookmarks
932 # Move bookmarks
933 bmarks = repo._bookmarks
933 bmarks = repo._bookmarks
934 bmarkchanges = []
934 bmarkchanges = []
935 for oldnode, newnode in moves.items():
935 for oldnode, newnode in moves.items():
936 oldbmarks = repo.nodebookmarks(oldnode)
936 oldbmarks = repo.nodebookmarks(oldnode)
937 if not oldbmarks:
937 if not oldbmarks:
938 continue
938 continue
939 from . import bookmarks # avoid import cycle
939 from . import bookmarks # avoid import cycle
940 repo.ui.debug('moving bookmarks %r from %s to %s\n' %
940 repo.ui.debug('moving bookmarks %r from %s to %s\n' %
941 (pycompat.rapply(pycompat.maybebytestr, oldbmarks),
941 (pycompat.rapply(pycompat.maybebytestr, oldbmarks),
942 hex(oldnode), hex(newnode)))
942 hex(oldnode), hex(newnode)))
943 # Delete divergent bookmarks being parents of related newnodes
943 # Delete divergent bookmarks being parents of related newnodes
944 deleterevs = repo.revs('parents(roots(%ln & (::%n))) - parents(%n)',
944 deleterevs = repo.revs('parents(roots(%ln & (::%n))) - parents(%n)',
945 allnewnodes, newnode, oldnode)
945 allnewnodes, newnode, oldnode)
946 deletenodes = _containsnode(repo, deleterevs)
946 deletenodes = _containsnode(repo, deleterevs)
947 for name in oldbmarks:
947 for name in oldbmarks:
948 bmarkchanges.append((name, newnode))
948 bmarkchanges.append((name, newnode))
949 for b in bookmarks.divergent2delete(repo, deletenodes, name):
949 for b in bookmarks.divergent2delete(repo, deletenodes, name):
950 bmarkchanges.append((b, None))
950 bmarkchanges.append((b, None))
951
951
952 if bmarkchanges:
952 if bmarkchanges:
953 bmarks.applychanges(repo, tr, bmarkchanges)
953 bmarks.applychanges(repo, tr, bmarkchanges)
954
954
955 for phase, nodes in toretract.items():
955 for phase, nodes in toretract.items():
956 phases.retractboundary(repo, tr, phase, nodes)
956 phases.retractboundary(repo, tr, phase, nodes)
957 for phase, nodes in toadvance.items():
957 for phase, nodes in toadvance.items():
958 phases.advanceboundary(repo, tr, phase, nodes)
958 phases.advanceboundary(repo, tr, phase, nodes)
959
959
960 # Obsolete or strip nodes
960 # Obsolete or strip nodes
961 if obsolete.isenabled(repo, obsolete.createmarkersopt):
961 if obsolete.isenabled(repo, obsolete.createmarkersopt):
962 # If a node is already obsoleted, and we want to obsolete it
962 # If a node is already obsoleted, and we want to obsolete it
963 # without a successor, skip that obssolete request since it's
963 # without a successor, skip that obssolete request since it's
964 # unnecessary. That's the "if s or not isobs(n)" check below.
964 # unnecessary. That's the "if s or not isobs(n)" check below.
965 # Also sort the node in topology order, that might be useful for
965 # Also sort the node in topology order, that might be useful for
966 # some obsstore logic.
966 # some obsstore logic.
967 # NOTE: the filtering and sorting might belong to createmarkers.
967 # NOTE: the filtering and sorting might belong to createmarkers.
968 isobs = unfi.obsstore.successors.__contains__
968 isobs = unfi.obsstore.successors.__contains__
969 torev = unfi.changelog.rev
969 torev = unfi.changelog.rev
970 sortfunc = lambda ns: torev(ns[0])
970 sortfunc = lambda ns: torev(ns[0])
971 rels = [(unfi[n], tuple(unfi[m] for m in s))
971 rels = [(unfi[n], tuple(unfi[m] for m in s))
972 for n, s in sorted(replacements.items(), key=sortfunc)
972 for n, s in sorted(replacements.items(), key=sortfunc)
973 if s or not isobs(n)]
973 if s or not isobs(n)]
974 if rels:
974 if rels:
975 obsolete.createmarkers(repo, rels, operation=operation,
975 obsolete.createmarkers(repo, rels, operation=operation,
976 metadata=metadata)
976 metadata=metadata)
977 else:
977 else:
978 from . import repair # avoid import cycle
978 from . import repair # avoid import cycle
979 tostrip = list(replacements)
979 tostrip = list(replacements)
980 if tostrip:
980 if tostrip:
981 repair.delayedstrip(repo.ui, repo, tostrip, operation,
981 repair.delayedstrip(repo.ui, repo, tostrip, operation,
982 backup=backup)
982 backup=backup)
983
983
984 def addremove(repo, matcher, prefix, opts=None):
984 def addremove(repo, matcher, prefix, opts=None):
985 if opts is None:
985 if opts is None:
986 opts = {}
986 opts = {}
987 m = matcher
987 m = matcher
988 dry_run = opts.get('dry_run')
988 dry_run = opts.get('dry_run')
989 try:
989 try:
990 similarity = float(opts.get('similarity') or 0)
990 similarity = float(opts.get('similarity') or 0)
991 except ValueError:
991 except ValueError:
992 raise error.Abort(_('similarity must be a number'))
992 raise error.Abort(_('similarity must be a number'))
993 if similarity < 0 or similarity > 100:
993 if similarity < 0 or similarity > 100:
994 raise error.Abort(_('similarity must be between 0 and 100'))
994 raise error.Abort(_('similarity must be between 0 and 100'))
995 similarity /= 100.0
995 similarity /= 100.0
996
996
997 ret = 0
997 ret = 0
998 join = lambda f: os.path.join(prefix, f)
998 join = lambda f: os.path.join(prefix, f)
999
999
1000 wctx = repo[None]
1000 wctx = repo[None]
1001 for subpath in sorted(wctx.substate):
1001 for subpath in sorted(wctx.substate):
1002 submatch = matchmod.subdirmatcher(subpath, m)
1002 submatch = matchmod.subdirmatcher(subpath, m)
1003 if opts.get('subrepos') or m.exact(subpath) or any(submatch.files()):
1003 if opts.get('subrepos') or m.exact(subpath) or any(submatch.files()):
1004 sub = wctx.sub(subpath)
1004 sub = wctx.sub(subpath)
1005 try:
1005 try:
1006 if sub.addremove(submatch, prefix, opts):
1006 if sub.addremove(submatch, prefix, opts):
1007 ret = 1
1007 ret = 1
1008 except error.LookupError:
1008 except error.LookupError:
1009 repo.ui.status(_("skipping missing subrepository: %s\n")
1009 repo.ui.status(_("skipping missing subrepository: %s\n")
1010 % join(subpath))
1010 % join(subpath))
1011
1011
1012 rejected = []
1012 rejected = []
1013 def badfn(f, msg):
1013 def badfn(f, msg):
1014 if f in m.files():
1014 if f in m.files():
1015 m.bad(f, msg)
1015 m.bad(f, msg)
1016 rejected.append(f)
1016 rejected.append(f)
1017
1017
1018 badmatch = matchmod.badmatch(m, badfn)
1018 badmatch = matchmod.badmatch(m, badfn)
1019 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
1019 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
1020 badmatch)
1020 badmatch)
1021
1021
1022 unknownset = set(unknown + forgotten)
1022 unknownset = set(unknown + forgotten)
1023 toprint = unknownset.copy()
1023 toprint = unknownset.copy()
1024 toprint.update(deleted)
1024 toprint.update(deleted)
1025 for abs in sorted(toprint):
1025 for abs in sorted(toprint):
1026 if repo.ui.verbose or not m.exact(abs):
1026 if repo.ui.verbose or not m.exact(abs):
1027 if abs in unknownset:
1027 if abs in unknownset:
1028 status = _('adding %s\n') % m.uipath(abs)
1028 status = _('adding %s\n') % m.uipath(abs)
1029 label = 'addremove.added'
1029 label = 'addremove.added'
1030 else:
1030 else:
1031 status = _('removing %s\n') % m.uipath(abs)
1031 status = _('removing %s\n') % m.uipath(abs)
1032 label = 'addremove.removed'
1032 label = 'addremove.removed'
1033 repo.ui.status(status, label=label)
1033 repo.ui.status(status, label=label)
1034
1034
1035 renames = _findrenames(repo, m, added + unknown, removed + deleted,
1035 renames = _findrenames(repo, m, added + unknown, removed + deleted,
1036 similarity)
1036 similarity)
1037
1037
1038 if not dry_run:
1038 if not dry_run:
1039 _markchanges(repo, unknown + forgotten, deleted, renames)
1039 _markchanges(repo, unknown + forgotten, deleted, renames)
1040
1040
1041 for f in rejected:
1041 for f in rejected:
1042 if f in m.files():
1042 if f in m.files():
1043 return 1
1043 return 1
1044 return ret
1044 return ret
1045
1045
1046 def marktouched(repo, files, similarity=0.0):
1046 def marktouched(repo, files, similarity=0.0):
1047 '''Assert that files have somehow been operated upon. files are relative to
1047 '''Assert that files have somehow been operated upon. files are relative to
1048 the repo root.'''
1048 the repo root.'''
1049 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
1049 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
1050 rejected = []
1050 rejected = []
1051
1051
1052 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
1052 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
1053
1053
1054 if repo.ui.verbose:
1054 if repo.ui.verbose:
1055 unknownset = set(unknown + forgotten)
1055 unknownset = set(unknown + forgotten)
1056 toprint = unknownset.copy()
1056 toprint = unknownset.copy()
1057 toprint.update(deleted)
1057 toprint.update(deleted)
1058 for abs in sorted(toprint):
1058 for abs in sorted(toprint):
1059 if abs in unknownset:
1059 if abs in unknownset:
1060 status = _('adding %s\n') % abs
1060 status = _('adding %s\n') % abs
1061 else:
1061 else:
1062 status = _('removing %s\n') % abs
1062 status = _('removing %s\n') % abs
1063 repo.ui.status(status)
1063 repo.ui.status(status)
1064
1064
1065 renames = _findrenames(repo, m, added + unknown, removed + deleted,
1065 renames = _findrenames(repo, m, added + unknown, removed + deleted,
1066 similarity)
1066 similarity)
1067
1067
1068 _markchanges(repo, unknown + forgotten, deleted, renames)
1068 _markchanges(repo, unknown + forgotten, deleted, renames)
1069
1069
1070 for f in rejected:
1070 for f in rejected:
1071 if f in m.files():
1071 if f in m.files():
1072 return 1
1072 return 1
1073 return 0
1073 return 0
1074
1074
1075 def _interestingfiles(repo, matcher):
1075 def _interestingfiles(repo, matcher):
1076 '''Walk dirstate with matcher, looking for files that addremove would care
1076 '''Walk dirstate with matcher, looking for files that addremove would care
1077 about.
1077 about.
1078
1078
1079 This is different from dirstate.status because it doesn't care about
1079 This is different from dirstate.status because it doesn't care about
1080 whether files are modified or clean.'''
1080 whether files are modified or clean.'''
1081 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1081 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1082 audit_path = pathutil.pathauditor(repo.root, cached=True)
1082 audit_path = pathutil.pathauditor(repo.root, cached=True)
1083
1083
1084 ctx = repo[None]
1084 ctx = repo[None]
1085 dirstate = repo.dirstate
1085 dirstate = repo.dirstate
1086 walkresults = dirstate.walk(matcher, subrepos=sorted(ctx.substate),
1086 walkresults = dirstate.walk(matcher, subrepos=sorted(ctx.substate),
1087 unknown=True, ignored=False, full=False)
1087 unknown=True, ignored=False, full=False)
1088 for abs, st in walkresults.iteritems():
1088 for abs, st in walkresults.iteritems():
1089 dstate = dirstate[abs]
1089 dstate = dirstate[abs]
1090 if dstate == '?' and audit_path.check(abs):
1090 if dstate == '?' and audit_path.check(abs):
1091 unknown.append(abs)
1091 unknown.append(abs)
1092 elif dstate != 'r' and not st:
1092 elif dstate != 'r' and not st:
1093 deleted.append(abs)
1093 deleted.append(abs)
1094 elif dstate == 'r' and st:
1094 elif dstate == 'r' and st:
1095 forgotten.append(abs)
1095 forgotten.append(abs)
1096 # for finding renames
1096 # for finding renames
1097 elif dstate == 'r' and not st:
1097 elif dstate == 'r' and not st:
1098 removed.append(abs)
1098 removed.append(abs)
1099 elif dstate == 'a':
1099 elif dstate == 'a':
1100 added.append(abs)
1100 added.append(abs)
1101
1101
1102 return added, unknown, deleted, removed, forgotten
1102 return added, unknown, deleted, removed, forgotten
1103
1103
1104 def _findrenames(repo, matcher, added, removed, similarity):
1104 def _findrenames(repo, matcher, added, removed, similarity):
1105 '''Find renames from removed files to added ones.'''
1105 '''Find renames from removed files to added ones.'''
1106 renames = {}
1106 renames = {}
1107 if similarity > 0:
1107 if similarity > 0:
1108 for old, new, score in similar.findrenames(repo, added, removed,
1108 for old, new, score in similar.findrenames(repo, added, removed,
1109 similarity):
1109 similarity):
1110 if (repo.ui.verbose or not matcher.exact(old)
1110 if (repo.ui.verbose or not matcher.exact(old)
1111 or not matcher.exact(new)):
1111 or not matcher.exact(new)):
1112 repo.ui.status(_('recording removal of %s as rename to %s '
1112 repo.ui.status(_('recording removal of %s as rename to %s '
1113 '(%d%% similar)\n') %
1113 '(%d%% similar)\n') %
1114 (matcher.rel(old), matcher.rel(new),
1114 (matcher.rel(old), matcher.rel(new),
1115 score * 100))
1115 score * 100))
1116 renames[new] = old
1116 renames[new] = old
1117 return renames
1117 return renames
1118
1118
1119 def _markchanges(repo, unknown, deleted, renames):
1119 def _markchanges(repo, unknown, deleted, renames):
1120 '''Marks the files in unknown as added, the files in deleted as removed,
1120 '''Marks the files in unknown as added, the files in deleted as removed,
1121 and the files in renames as copied.'''
1121 and the files in renames as copied.'''
1122 wctx = repo[None]
1122 wctx = repo[None]
1123 with repo.wlock():
1123 with repo.wlock():
1124 wctx.forget(deleted)
1124 wctx.forget(deleted)
1125 wctx.add(unknown)
1125 wctx.add(unknown)
1126 for new, old in renames.iteritems():
1126 for new, old in renames.iteritems():
1127 wctx.copy(old, new)
1127 wctx.copy(old, new)
1128
1128
1129 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1129 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1130 """Update the dirstate to reflect the intent of copying src to dst. For
1130 """Update the dirstate to reflect the intent of copying src to dst. For
1131 different reasons it might not end with dst being marked as copied from src.
1131 different reasons it might not end with dst being marked as copied from src.
1132 """
1132 """
1133 origsrc = repo.dirstate.copied(src) or src
1133 origsrc = repo.dirstate.copied(src) or src
1134 if dst == origsrc: # copying back a copy?
1134 if dst == origsrc: # copying back a copy?
1135 if repo.dirstate[dst] not in 'mn' and not dryrun:
1135 if repo.dirstate[dst] not in 'mn' and not dryrun:
1136 repo.dirstate.normallookup(dst)
1136 repo.dirstate.normallookup(dst)
1137 else:
1137 else:
1138 if repo.dirstate[origsrc] == 'a' and origsrc == src:
1138 if repo.dirstate[origsrc] == 'a' and origsrc == src:
1139 if not ui.quiet:
1139 if not ui.quiet:
1140 ui.warn(_("%s has not been committed yet, so no copy "
1140 ui.warn(_("%s has not been committed yet, so no copy "
1141 "data will be stored for %s.\n")
1141 "data will be stored for %s.\n")
1142 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
1142 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
1143 if repo.dirstate[dst] in '?r' and not dryrun:
1143 if repo.dirstate[dst] in '?r' and not dryrun:
1144 wctx.add([dst])
1144 wctx.add([dst])
1145 elif not dryrun:
1145 elif not dryrun:
1146 wctx.copy(origsrc, dst)
1146 wctx.copy(origsrc, dst)
1147
1147
1148 def readrequires(opener, supported):
1149 '''Reads and parses .hg/requires and checks if all entries found
1150 are in the list of supported features.'''
1151 requirements = set(opener.read("requires").splitlines())
1152 missings = []
1153 for r in requirements:
1154 if r not in supported:
1155 if not r or not r[0:1].isalnum():
1156 raise error.RequirementError(_(".hg/requires file is corrupt"))
1157 missings.append(r)
1158 missings.sort()
1159 if missings:
1160 raise error.RequirementError(
1161 _("repository requires features unknown to this Mercurial: %s")
1162 % " ".join(missings),
1163 hint=_("see https://mercurial-scm.org/wiki/MissingRequirement"
1164 " for more information"))
1165 return requirements
1166
1167 def writerequires(opener, requirements):
1148 def writerequires(opener, requirements):
1168 with opener('requires', 'w') as fp:
1149 with opener('requires', 'w') as fp:
1169 for r in sorted(requirements):
1150 for r in sorted(requirements):
1170 fp.write("%s\n" % r)
1151 fp.write("%s\n" % r)
1171
1152
1172 class filecachesubentry(object):
1153 class filecachesubentry(object):
1173 def __init__(self, path, stat):
1154 def __init__(self, path, stat):
1174 self.path = path
1155 self.path = path
1175 self.cachestat = None
1156 self.cachestat = None
1176 self._cacheable = None
1157 self._cacheable = None
1177
1158
1178 if stat:
1159 if stat:
1179 self.cachestat = filecachesubentry.stat(self.path)
1160 self.cachestat = filecachesubentry.stat(self.path)
1180
1161
1181 if self.cachestat:
1162 if self.cachestat:
1182 self._cacheable = self.cachestat.cacheable()
1163 self._cacheable = self.cachestat.cacheable()
1183 else:
1164 else:
1184 # None means we don't know yet
1165 # None means we don't know yet
1185 self._cacheable = None
1166 self._cacheable = None
1186
1167
1187 def refresh(self):
1168 def refresh(self):
1188 if self.cacheable():
1169 if self.cacheable():
1189 self.cachestat = filecachesubentry.stat(self.path)
1170 self.cachestat = filecachesubentry.stat(self.path)
1190
1171
1191 def cacheable(self):
1172 def cacheable(self):
1192 if self._cacheable is not None:
1173 if self._cacheable is not None:
1193 return self._cacheable
1174 return self._cacheable
1194
1175
1195 # we don't know yet, assume it is for now
1176 # we don't know yet, assume it is for now
1196 return True
1177 return True
1197
1178
1198 def changed(self):
1179 def changed(self):
1199 # no point in going further if we can't cache it
1180 # no point in going further if we can't cache it
1200 if not self.cacheable():
1181 if not self.cacheable():
1201 return True
1182 return True
1202
1183
1203 newstat = filecachesubentry.stat(self.path)
1184 newstat = filecachesubentry.stat(self.path)
1204
1185
1205 # we may not know if it's cacheable yet, check again now
1186 # we may not know if it's cacheable yet, check again now
1206 if newstat and self._cacheable is None:
1187 if newstat and self._cacheable is None:
1207 self._cacheable = newstat.cacheable()
1188 self._cacheable = newstat.cacheable()
1208
1189
1209 # check again
1190 # check again
1210 if not self._cacheable:
1191 if not self._cacheable:
1211 return True
1192 return True
1212
1193
1213 if self.cachestat != newstat:
1194 if self.cachestat != newstat:
1214 self.cachestat = newstat
1195 self.cachestat = newstat
1215 return True
1196 return True
1216 else:
1197 else:
1217 return False
1198 return False
1218
1199
1219 @staticmethod
1200 @staticmethod
1220 def stat(path):
1201 def stat(path):
1221 try:
1202 try:
1222 return util.cachestat(path)
1203 return util.cachestat(path)
1223 except OSError as e:
1204 except OSError as e:
1224 if e.errno != errno.ENOENT:
1205 if e.errno != errno.ENOENT:
1225 raise
1206 raise
1226
1207
1227 class filecacheentry(object):
1208 class filecacheentry(object):
1228 def __init__(self, paths, stat=True):
1209 def __init__(self, paths, stat=True):
1229 self._entries = []
1210 self._entries = []
1230 for path in paths:
1211 for path in paths:
1231 self._entries.append(filecachesubentry(path, stat))
1212 self._entries.append(filecachesubentry(path, stat))
1232
1213
1233 def changed(self):
1214 def changed(self):
1234 '''true if any entry has changed'''
1215 '''true if any entry has changed'''
1235 for entry in self._entries:
1216 for entry in self._entries:
1236 if entry.changed():
1217 if entry.changed():
1237 return True
1218 return True
1238 return False
1219 return False
1239
1220
1240 def refresh(self):
1221 def refresh(self):
1241 for entry in self._entries:
1222 for entry in self._entries:
1242 entry.refresh()
1223 entry.refresh()
1243
1224
1244 class filecache(object):
1225 class filecache(object):
1245 """A property like decorator that tracks files under .hg/ for updates.
1226 """A property like decorator that tracks files under .hg/ for updates.
1246
1227
1247 On first access, the files defined as arguments are stat()ed and the
1228 On first access, the files defined as arguments are stat()ed and the
1248 results cached. The decorated function is called. The results are stashed
1229 results cached. The decorated function is called. The results are stashed
1249 away in a ``_filecache`` dict on the object whose method is decorated.
1230 away in a ``_filecache`` dict on the object whose method is decorated.
1250
1231
1251 On subsequent access, the cached result is returned.
1232 On subsequent access, the cached result is returned.
1252
1233
1253 On external property set operations, stat() calls are performed and the new
1234 On external property set operations, stat() calls are performed and the new
1254 value is cached.
1235 value is cached.
1255
1236
1256 On property delete operations, cached data is removed.
1237 On property delete operations, cached data is removed.
1257
1238
1258 When using the property API, cached data is always returned, if available:
1239 When using the property API, cached data is always returned, if available:
1259 no stat() is performed to check if the file has changed and if the function
1240 no stat() is performed to check if the file has changed and if the function
1260 needs to be called to reflect file changes.
1241 needs to be called to reflect file changes.
1261
1242
1262 Others can muck about with the state of the ``_filecache`` dict. e.g. they
1243 Others can muck about with the state of the ``_filecache`` dict. e.g. they
1263 can populate an entry before the property's getter is called. In this case,
1244 can populate an entry before the property's getter is called. In this case,
1264 entries in ``_filecache`` will be used during property operations,
1245 entries in ``_filecache`` will be used during property operations,
1265 if available. If the underlying file changes, it is up to external callers
1246 if available. If the underlying file changes, it is up to external callers
1266 to reflect this by e.g. calling ``delattr(obj, attr)`` to remove the cached
1247 to reflect this by e.g. calling ``delattr(obj, attr)`` to remove the cached
1267 method result as well as possibly calling ``del obj._filecache[attr]`` to
1248 method result as well as possibly calling ``del obj._filecache[attr]`` to
1268 remove the ``filecacheentry``.
1249 remove the ``filecacheentry``.
1269 """
1250 """
1270
1251
1271 def __init__(self, *paths):
1252 def __init__(self, *paths):
1272 self.paths = paths
1253 self.paths = paths
1273
1254
1274 def join(self, obj, fname):
1255 def join(self, obj, fname):
1275 """Used to compute the runtime path of a cached file.
1256 """Used to compute the runtime path of a cached file.
1276
1257
1277 Users should subclass filecache and provide their own version of this
1258 Users should subclass filecache and provide their own version of this
1278 function to call the appropriate join function on 'obj' (an instance
1259 function to call the appropriate join function on 'obj' (an instance
1279 of the class that its member function was decorated).
1260 of the class that its member function was decorated).
1280 """
1261 """
1281 raise NotImplementedError
1262 raise NotImplementedError
1282
1263
1283 def __call__(self, func):
1264 def __call__(self, func):
1284 self.func = func
1265 self.func = func
1285 self.sname = func.__name__
1266 self.sname = func.__name__
1286 self.name = pycompat.sysbytes(self.sname)
1267 self.name = pycompat.sysbytes(self.sname)
1287 return self
1268 return self
1288
1269
1289 def __get__(self, obj, type=None):
1270 def __get__(self, obj, type=None):
1290 # if accessed on the class, return the descriptor itself.
1271 # if accessed on the class, return the descriptor itself.
1291 if obj is None:
1272 if obj is None:
1292 return self
1273 return self
1293 # do we need to check if the file changed?
1274 # do we need to check if the file changed?
1294 if self.sname in obj.__dict__:
1275 if self.sname in obj.__dict__:
1295 assert self.name in obj._filecache, self.name
1276 assert self.name in obj._filecache, self.name
1296 return obj.__dict__[self.sname]
1277 return obj.__dict__[self.sname]
1297
1278
1298 entry = obj._filecache.get(self.name)
1279 entry = obj._filecache.get(self.name)
1299
1280
1300 if entry:
1281 if entry:
1301 if entry.changed():
1282 if entry.changed():
1302 entry.obj = self.func(obj)
1283 entry.obj = self.func(obj)
1303 else:
1284 else:
1304 paths = [self.join(obj, path) for path in self.paths]
1285 paths = [self.join(obj, path) for path in self.paths]
1305
1286
1306 # We stat -before- creating the object so our cache doesn't lie if
1287 # We stat -before- creating the object so our cache doesn't lie if
1307 # a writer modified between the time we read and stat
1288 # a writer modified between the time we read and stat
1308 entry = filecacheentry(paths, True)
1289 entry = filecacheentry(paths, True)
1309 entry.obj = self.func(obj)
1290 entry.obj = self.func(obj)
1310
1291
1311 obj._filecache[self.name] = entry
1292 obj._filecache[self.name] = entry
1312
1293
1313 obj.__dict__[self.sname] = entry.obj
1294 obj.__dict__[self.sname] = entry.obj
1314 return entry.obj
1295 return entry.obj
1315
1296
1316 def __set__(self, obj, value):
1297 def __set__(self, obj, value):
1317 if self.name not in obj._filecache:
1298 if self.name not in obj._filecache:
1318 # we add an entry for the missing value because X in __dict__
1299 # we add an entry for the missing value because X in __dict__
1319 # implies X in _filecache
1300 # implies X in _filecache
1320 paths = [self.join(obj, path) for path in self.paths]
1301 paths = [self.join(obj, path) for path in self.paths]
1321 ce = filecacheentry(paths, False)
1302 ce = filecacheentry(paths, False)
1322 obj._filecache[self.name] = ce
1303 obj._filecache[self.name] = ce
1323 else:
1304 else:
1324 ce = obj._filecache[self.name]
1305 ce = obj._filecache[self.name]
1325
1306
1326 ce.obj = value # update cached copy
1307 ce.obj = value # update cached copy
1327 obj.__dict__[self.sname] = value # update copy returned by obj.x
1308 obj.__dict__[self.sname] = value # update copy returned by obj.x
1328
1309
1329 def __delete__(self, obj):
1310 def __delete__(self, obj):
1330 try:
1311 try:
1331 del obj.__dict__[self.sname]
1312 del obj.__dict__[self.sname]
1332 except KeyError:
1313 except KeyError:
1333 raise AttributeError(self.sname)
1314 raise AttributeError(self.sname)
1334
1315
1335 def extdatasource(repo, source):
1316 def extdatasource(repo, source):
1336 """Gather a map of rev -> value dict from the specified source
1317 """Gather a map of rev -> value dict from the specified source
1337
1318
1338 A source spec is treated as a URL, with a special case shell: type
1319 A source spec is treated as a URL, with a special case shell: type
1339 for parsing the output from a shell command.
1320 for parsing the output from a shell command.
1340
1321
1341 The data is parsed as a series of newline-separated records where
1322 The data is parsed as a series of newline-separated records where
1342 each record is a revision specifier optionally followed by a space
1323 each record is a revision specifier optionally followed by a space
1343 and a freeform string value. If the revision is known locally, it
1324 and a freeform string value. If the revision is known locally, it
1344 is converted to a rev, otherwise the record is skipped.
1325 is converted to a rev, otherwise the record is skipped.
1345
1326
1346 Note that both key and value are treated as UTF-8 and converted to
1327 Note that both key and value are treated as UTF-8 and converted to
1347 the local encoding. This allows uniformity between local and
1328 the local encoding. This allows uniformity between local and
1348 remote data sources.
1329 remote data sources.
1349 """
1330 """
1350
1331
1351 spec = repo.ui.config("extdata", source)
1332 spec = repo.ui.config("extdata", source)
1352 if not spec:
1333 if not spec:
1353 raise error.Abort(_("unknown extdata source '%s'") % source)
1334 raise error.Abort(_("unknown extdata source '%s'") % source)
1354
1335
1355 data = {}
1336 data = {}
1356 src = proc = None
1337 src = proc = None
1357 try:
1338 try:
1358 if spec.startswith("shell:"):
1339 if spec.startswith("shell:"):
1359 # external commands should be run relative to the repo root
1340 # external commands should be run relative to the repo root
1360 cmd = spec[6:]
1341 cmd = spec[6:]
1361 proc = subprocess.Popen(cmd, shell=True, bufsize=-1,
1342 proc = subprocess.Popen(cmd, shell=True, bufsize=-1,
1362 close_fds=procutil.closefds,
1343 close_fds=procutil.closefds,
1363 stdout=subprocess.PIPE, cwd=repo.root)
1344 stdout=subprocess.PIPE, cwd=repo.root)
1364 src = proc.stdout
1345 src = proc.stdout
1365 else:
1346 else:
1366 # treat as a URL or file
1347 # treat as a URL or file
1367 src = url.open(repo.ui, spec)
1348 src = url.open(repo.ui, spec)
1368 for l in src:
1349 for l in src:
1369 if " " in l:
1350 if " " in l:
1370 k, v = l.strip().split(" ", 1)
1351 k, v = l.strip().split(" ", 1)
1371 else:
1352 else:
1372 k, v = l.strip(), ""
1353 k, v = l.strip(), ""
1373
1354
1374 k = encoding.tolocal(k)
1355 k = encoding.tolocal(k)
1375 try:
1356 try:
1376 data[revsingle(repo, k).rev()] = encoding.tolocal(v)
1357 data[revsingle(repo, k).rev()] = encoding.tolocal(v)
1377 except (error.LookupError, error.RepoLookupError):
1358 except (error.LookupError, error.RepoLookupError):
1378 pass # we ignore data for nodes that don't exist locally
1359 pass # we ignore data for nodes that don't exist locally
1379 finally:
1360 finally:
1380 if proc:
1361 if proc:
1381 proc.communicate()
1362 proc.communicate()
1382 if src:
1363 if src:
1383 src.close()
1364 src.close()
1384 if proc and proc.returncode != 0:
1365 if proc and proc.returncode != 0:
1385 raise error.Abort(_("extdata command '%s' failed: %s")
1366 raise error.Abort(_("extdata command '%s' failed: %s")
1386 % (cmd, procutil.explainexit(proc.returncode)))
1367 % (cmd, procutil.explainexit(proc.returncode)))
1387
1368
1388 return data
1369 return data
1389
1370
1390 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1371 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1391 if lock is None:
1372 if lock is None:
1392 raise error.LockInheritanceContractViolation(
1373 raise error.LockInheritanceContractViolation(
1393 'lock can only be inherited while held')
1374 'lock can only be inherited while held')
1394 if environ is None:
1375 if environ is None:
1395 environ = {}
1376 environ = {}
1396 with lock.inherit() as locker:
1377 with lock.inherit() as locker:
1397 environ[envvar] = locker
1378 environ[envvar] = locker
1398 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1379 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1399
1380
1400 def wlocksub(repo, cmd, *args, **kwargs):
1381 def wlocksub(repo, cmd, *args, **kwargs):
1401 """run cmd as a subprocess that allows inheriting repo's wlock
1382 """run cmd as a subprocess that allows inheriting repo's wlock
1402
1383
1403 This can only be called while the wlock is held. This takes all the
1384 This can only be called while the wlock is held. This takes all the
1404 arguments that ui.system does, and returns the exit code of the
1385 arguments that ui.system does, and returns the exit code of the
1405 subprocess."""
1386 subprocess."""
1406 return _locksub(repo, repo.currentwlock(), 'HG_WLOCK_LOCKER', cmd, *args,
1387 return _locksub(repo, repo.currentwlock(), 'HG_WLOCK_LOCKER', cmd, *args,
1407 **kwargs)
1388 **kwargs)
1408
1389
1409 class progress(object):
1390 class progress(object):
1410 def __init__(self, ui, topic, unit="", total=None):
1391 def __init__(self, ui, topic, unit="", total=None):
1411 self.ui = ui
1392 self.ui = ui
1412 self.pos = 0
1393 self.pos = 0
1413 self.topic = topic
1394 self.topic = topic
1414 self.unit = unit
1395 self.unit = unit
1415 self.total = total
1396 self.total = total
1416
1397
1417 def __enter__(self):
1398 def __enter__(self):
1418 return self
1399 return self
1419
1400
1420 def __exit__(self, exc_type, exc_value, exc_tb):
1401 def __exit__(self, exc_type, exc_value, exc_tb):
1421 self.complete()
1402 self.complete()
1422
1403
1423 def update(self, pos, item="", total=None):
1404 def update(self, pos, item="", total=None):
1424 assert pos is not None
1405 assert pos is not None
1425 if total:
1406 if total:
1426 self.total = total
1407 self.total = total
1427 self.pos = pos
1408 self.pos = pos
1428 self._print(item)
1409 self._print(item)
1429
1410
1430 def increment(self, step=1, item="", total=None):
1411 def increment(self, step=1, item="", total=None):
1431 self.update(self.pos + step, item, total)
1412 self.update(self.pos + step, item, total)
1432
1413
1433 def complete(self):
1414 def complete(self):
1434 self.ui.progress(self.topic, None)
1415 self.ui.progress(self.topic, None)
1435
1416
1436 def _print(self, item):
1417 def _print(self, item):
1437 self.ui.progress(self.topic, self.pos, item, self.unit,
1418 self.ui.progress(self.topic, self.pos, item, self.unit,
1438 self.total)
1419 self.total)
1439
1420
1440 def gdinitconfig(ui):
1421 def gdinitconfig(ui):
1441 """helper function to know if a repo should be created as general delta
1422 """helper function to know if a repo should be created as general delta
1442 """
1423 """
1443 # experimental config: format.generaldelta
1424 # experimental config: format.generaldelta
1444 return (ui.configbool('format', 'generaldelta')
1425 return (ui.configbool('format', 'generaldelta')
1445 or ui.configbool('format', 'usegeneraldelta')
1426 or ui.configbool('format', 'usegeneraldelta')
1446 or ui.configbool('format', 'sparse-revlog'))
1427 or ui.configbool('format', 'sparse-revlog'))
1447
1428
1448 def gddeltaconfig(ui):
1429 def gddeltaconfig(ui):
1449 """helper function to know if incoming delta should be optimised
1430 """helper function to know if incoming delta should be optimised
1450 """
1431 """
1451 # experimental config: format.generaldelta
1432 # experimental config: format.generaldelta
1452 return ui.configbool('format', 'generaldelta')
1433 return ui.configbool('format', 'generaldelta')
1453
1434
1454 class simplekeyvaluefile(object):
1435 class simplekeyvaluefile(object):
1455 """A simple file with key=value lines
1436 """A simple file with key=value lines
1456
1437
1457 Keys must be alphanumerics and start with a letter, values must not
1438 Keys must be alphanumerics and start with a letter, values must not
1458 contain '\n' characters"""
1439 contain '\n' characters"""
1459 firstlinekey = '__firstline'
1440 firstlinekey = '__firstline'
1460
1441
1461 def __init__(self, vfs, path, keys=None):
1442 def __init__(self, vfs, path, keys=None):
1462 self.vfs = vfs
1443 self.vfs = vfs
1463 self.path = path
1444 self.path = path
1464
1445
1465 def read(self, firstlinenonkeyval=False):
1446 def read(self, firstlinenonkeyval=False):
1466 """Read the contents of a simple key-value file
1447 """Read the contents of a simple key-value file
1467
1448
1468 'firstlinenonkeyval' indicates whether the first line of file should
1449 'firstlinenonkeyval' indicates whether the first line of file should
1469 be treated as a key-value pair or reuturned fully under the
1450 be treated as a key-value pair or reuturned fully under the
1470 __firstline key."""
1451 __firstline key."""
1471 lines = self.vfs.readlines(self.path)
1452 lines = self.vfs.readlines(self.path)
1472 d = {}
1453 d = {}
1473 if firstlinenonkeyval:
1454 if firstlinenonkeyval:
1474 if not lines:
1455 if not lines:
1475 e = _("empty simplekeyvalue file")
1456 e = _("empty simplekeyvalue file")
1476 raise error.CorruptedState(e)
1457 raise error.CorruptedState(e)
1477 # we don't want to include '\n' in the __firstline
1458 # we don't want to include '\n' in the __firstline
1478 d[self.firstlinekey] = lines[0][:-1]
1459 d[self.firstlinekey] = lines[0][:-1]
1479 del lines[0]
1460 del lines[0]
1480
1461
1481 try:
1462 try:
1482 # the 'if line.strip()' part prevents us from failing on empty
1463 # the 'if line.strip()' part prevents us from failing on empty
1483 # lines which only contain '\n' therefore are not skipped
1464 # lines which only contain '\n' therefore are not skipped
1484 # by 'if line'
1465 # by 'if line'
1485 updatedict = dict(line[:-1].split('=', 1) for line in lines
1466 updatedict = dict(line[:-1].split('=', 1) for line in lines
1486 if line.strip())
1467 if line.strip())
1487 if self.firstlinekey in updatedict:
1468 if self.firstlinekey in updatedict:
1488 e = _("%r can't be used as a key")
1469 e = _("%r can't be used as a key")
1489 raise error.CorruptedState(e % self.firstlinekey)
1470 raise error.CorruptedState(e % self.firstlinekey)
1490 d.update(updatedict)
1471 d.update(updatedict)
1491 except ValueError as e:
1472 except ValueError as e:
1492 raise error.CorruptedState(str(e))
1473 raise error.CorruptedState(str(e))
1493 return d
1474 return d
1494
1475
1495 def write(self, data, firstline=None):
1476 def write(self, data, firstline=None):
1496 """Write key=>value mapping to a file
1477 """Write key=>value mapping to a file
1497 data is a dict. Keys must be alphanumerical and start with a letter.
1478 data is a dict. Keys must be alphanumerical and start with a letter.
1498 Values must not contain newline characters.
1479 Values must not contain newline characters.
1499
1480
1500 If 'firstline' is not None, it is written to file before
1481 If 'firstline' is not None, it is written to file before
1501 everything else, as it is, not in a key=value form"""
1482 everything else, as it is, not in a key=value form"""
1502 lines = []
1483 lines = []
1503 if firstline is not None:
1484 if firstline is not None:
1504 lines.append('%s\n' % firstline)
1485 lines.append('%s\n' % firstline)
1505
1486
1506 for k, v in data.items():
1487 for k, v in data.items():
1507 if k == self.firstlinekey:
1488 if k == self.firstlinekey:
1508 e = "key name '%s' is reserved" % self.firstlinekey
1489 e = "key name '%s' is reserved" % self.firstlinekey
1509 raise error.ProgrammingError(e)
1490 raise error.ProgrammingError(e)
1510 if not k[0:1].isalpha():
1491 if not k[0:1].isalpha():
1511 e = "keys must start with a letter in a key-value file"
1492 e = "keys must start with a letter in a key-value file"
1512 raise error.ProgrammingError(e)
1493 raise error.ProgrammingError(e)
1513 if not k.isalnum():
1494 if not k.isalnum():
1514 e = "invalid key name in a simple key-value file"
1495 e = "invalid key name in a simple key-value file"
1515 raise error.ProgrammingError(e)
1496 raise error.ProgrammingError(e)
1516 if '\n' in v:
1497 if '\n' in v:
1517 e = "invalid value in a simple key-value file"
1498 e = "invalid value in a simple key-value file"
1518 raise error.ProgrammingError(e)
1499 raise error.ProgrammingError(e)
1519 lines.append("%s=%s\n" % (k, v))
1500 lines.append("%s=%s\n" % (k, v))
1520 with self.vfs(self.path, mode='wb', atomictemp=True) as fp:
1501 with self.vfs(self.path, mode='wb', atomictemp=True) as fp:
1521 fp.write(''.join(lines))
1502 fp.write(''.join(lines))
1522
1503
1523 _reportobsoletedsource = [
1504 _reportobsoletedsource = [
1524 'debugobsolete',
1505 'debugobsolete',
1525 'pull',
1506 'pull',
1526 'push',
1507 'push',
1527 'serve',
1508 'serve',
1528 'unbundle',
1509 'unbundle',
1529 ]
1510 ]
1530
1511
1531 _reportnewcssource = [
1512 _reportnewcssource = [
1532 'pull',
1513 'pull',
1533 'unbundle',
1514 'unbundle',
1534 ]
1515 ]
1535
1516
1536 def prefetchfiles(repo, revs, match):
1517 def prefetchfiles(repo, revs, match):
1537 """Invokes the registered file prefetch functions, allowing extensions to
1518 """Invokes the registered file prefetch functions, allowing extensions to
1538 ensure the corresponding files are available locally, before the command
1519 ensure the corresponding files are available locally, before the command
1539 uses them."""
1520 uses them."""
1540 if match:
1521 if match:
1541 # The command itself will complain about files that don't exist, so
1522 # The command itself will complain about files that don't exist, so
1542 # don't duplicate the message.
1523 # don't duplicate the message.
1543 match = matchmod.badmatch(match, lambda fn, msg: None)
1524 match = matchmod.badmatch(match, lambda fn, msg: None)
1544 else:
1525 else:
1545 match = matchall(repo)
1526 match = matchall(repo)
1546
1527
1547 fileprefetchhooks(repo, revs, match)
1528 fileprefetchhooks(repo, revs, match)
1548
1529
1549 # a list of (repo, revs, match) prefetch functions
1530 # a list of (repo, revs, match) prefetch functions
1550 fileprefetchhooks = util.hooks()
1531 fileprefetchhooks = util.hooks()
1551
1532
1552 # A marker that tells the evolve extension to suppress its own reporting
1533 # A marker that tells the evolve extension to suppress its own reporting
1553 _reportstroubledchangesets = True
1534 _reportstroubledchangesets = True
1554
1535
1555 def registersummarycallback(repo, otr, txnname=''):
1536 def registersummarycallback(repo, otr, txnname=''):
1556 """register a callback to issue a summary after the transaction is closed
1537 """register a callback to issue a summary after the transaction is closed
1557 """
1538 """
1558 def txmatch(sources):
1539 def txmatch(sources):
1559 return any(txnname.startswith(source) for source in sources)
1540 return any(txnname.startswith(source) for source in sources)
1560
1541
1561 categories = []
1542 categories = []
1562
1543
1563 def reportsummary(func):
1544 def reportsummary(func):
1564 """decorator for report callbacks."""
1545 """decorator for report callbacks."""
1565 # The repoview life cycle is shorter than the one of the actual
1546 # The repoview life cycle is shorter than the one of the actual
1566 # underlying repository. So the filtered object can die before the
1547 # underlying repository. So the filtered object can die before the
1567 # weakref is used leading to troubles. We keep a reference to the
1548 # weakref is used leading to troubles. We keep a reference to the
1568 # unfiltered object and restore the filtering when retrieving the
1549 # unfiltered object and restore the filtering when retrieving the
1569 # repository through the weakref.
1550 # repository through the weakref.
1570 filtername = repo.filtername
1551 filtername = repo.filtername
1571 reporef = weakref.ref(repo.unfiltered())
1552 reporef = weakref.ref(repo.unfiltered())
1572 def wrapped(tr):
1553 def wrapped(tr):
1573 repo = reporef()
1554 repo = reporef()
1574 if filtername:
1555 if filtername:
1575 repo = repo.filtered(filtername)
1556 repo = repo.filtered(filtername)
1576 func(repo, tr)
1557 func(repo, tr)
1577 newcat = '%02i-txnreport' % len(categories)
1558 newcat = '%02i-txnreport' % len(categories)
1578 otr.addpostclose(newcat, wrapped)
1559 otr.addpostclose(newcat, wrapped)
1579 categories.append(newcat)
1560 categories.append(newcat)
1580 return wrapped
1561 return wrapped
1581
1562
1582 if txmatch(_reportobsoletedsource):
1563 if txmatch(_reportobsoletedsource):
1583 @reportsummary
1564 @reportsummary
1584 def reportobsoleted(repo, tr):
1565 def reportobsoleted(repo, tr):
1585 obsoleted = obsutil.getobsoleted(repo, tr)
1566 obsoleted = obsutil.getobsoleted(repo, tr)
1586 if obsoleted:
1567 if obsoleted:
1587 repo.ui.status(_('obsoleted %i changesets\n')
1568 repo.ui.status(_('obsoleted %i changesets\n')
1588 % len(obsoleted))
1569 % len(obsoleted))
1589
1570
1590 if (obsolete.isenabled(repo, obsolete.createmarkersopt) and
1571 if (obsolete.isenabled(repo, obsolete.createmarkersopt) and
1591 repo.ui.configbool('experimental', 'evolution.report-instabilities')):
1572 repo.ui.configbool('experimental', 'evolution.report-instabilities')):
1592 instabilitytypes = [
1573 instabilitytypes = [
1593 ('orphan', 'orphan'),
1574 ('orphan', 'orphan'),
1594 ('phase-divergent', 'phasedivergent'),
1575 ('phase-divergent', 'phasedivergent'),
1595 ('content-divergent', 'contentdivergent'),
1576 ('content-divergent', 'contentdivergent'),
1596 ]
1577 ]
1597
1578
1598 def getinstabilitycounts(repo):
1579 def getinstabilitycounts(repo):
1599 filtered = repo.changelog.filteredrevs
1580 filtered = repo.changelog.filteredrevs
1600 counts = {}
1581 counts = {}
1601 for instability, revset in instabilitytypes:
1582 for instability, revset in instabilitytypes:
1602 counts[instability] = len(set(obsolete.getrevs(repo, revset)) -
1583 counts[instability] = len(set(obsolete.getrevs(repo, revset)) -
1603 filtered)
1584 filtered)
1604 return counts
1585 return counts
1605
1586
1606 oldinstabilitycounts = getinstabilitycounts(repo)
1587 oldinstabilitycounts = getinstabilitycounts(repo)
1607 @reportsummary
1588 @reportsummary
1608 def reportnewinstabilities(repo, tr):
1589 def reportnewinstabilities(repo, tr):
1609 newinstabilitycounts = getinstabilitycounts(repo)
1590 newinstabilitycounts = getinstabilitycounts(repo)
1610 for instability, revset in instabilitytypes:
1591 for instability, revset in instabilitytypes:
1611 delta = (newinstabilitycounts[instability] -
1592 delta = (newinstabilitycounts[instability] -
1612 oldinstabilitycounts[instability])
1593 oldinstabilitycounts[instability])
1613 msg = getinstabilitymessage(delta, instability)
1594 msg = getinstabilitymessage(delta, instability)
1614 if msg:
1595 if msg:
1615 repo.ui.warn(msg)
1596 repo.ui.warn(msg)
1616
1597
1617 if txmatch(_reportnewcssource):
1598 if txmatch(_reportnewcssource):
1618 @reportsummary
1599 @reportsummary
1619 def reportnewcs(repo, tr):
1600 def reportnewcs(repo, tr):
1620 """Report the range of new revisions pulled/unbundled."""
1601 """Report the range of new revisions pulled/unbundled."""
1621 origrepolen = tr.changes.get('origrepolen', len(repo))
1602 origrepolen = tr.changes.get('origrepolen', len(repo))
1622 if origrepolen >= len(repo):
1603 if origrepolen >= len(repo):
1623 return
1604 return
1624
1605
1625 # Compute the bounds of new revisions' range, excluding obsoletes.
1606 # Compute the bounds of new revisions' range, excluding obsoletes.
1626 unfi = repo.unfiltered()
1607 unfi = repo.unfiltered()
1627 revs = unfi.revs('%d: and not obsolete()', origrepolen)
1608 revs = unfi.revs('%d: and not obsolete()', origrepolen)
1628 if not revs:
1609 if not revs:
1629 # Got only obsoletes.
1610 # Got only obsoletes.
1630 return
1611 return
1631 minrev, maxrev = repo[revs.min()], repo[revs.max()]
1612 minrev, maxrev = repo[revs.min()], repo[revs.max()]
1632
1613
1633 if minrev == maxrev:
1614 if minrev == maxrev:
1634 revrange = minrev
1615 revrange = minrev
1635 else:
1616 else:
1636 revrange = '%s:%s' % (minrev, maxrev)
1617 revrange = '%s:%s' % (minrev, maxrev)
1637 draft = len(repo.revs('%ld and draft()', revs))
1618 draft = len(repo.revs('%ld and draft()', revs))
1638 secret = len(repo.revs('%ld and secret()', revs))
1619 secret = len(repo.revs('%ld and secret()', revs))
1639 if not (draft or secret):
1620 if not (draft or secret):
1640 msg = _('new changesets %s\n') % revrange
1621 msg = _('new changesets %s\n') % revrange
1641 elif draft and secret:
1622 elif draft and secret:
1642 msg = _('new changesets %s (%d drafts, %d secrets)\n')
1623 msg = _('new changesets %s (%d drafts, %d secrets)\n')
1643 msg %= (revrange, draft, secret)
1624 msg %= (revrange, draft, secret)
1644 elif draft:
1625 elif draft:
1645 msg = _('new changesets %s (%d drafts)\n')
1626 msg = _('new changesets %s (%d drafts)\n')
1646 msg %= (revrange, draft)
1627 msg %= (revrange, draft)
1647 elif secret:
1628 elif secret:
1648 msg = _('new changesets %s (%d secrets)\n')
1629 msg = _('new changesets %s (%d secrets)\n')
1649 msg %= (revrange, secret)
1630 msg %= (revrange, secret)
1650 else:
1631 else:
1651 raise error.ProgrammingError('entered unreachable condition')
1632 raise error.ProgrammingError('entered unreachable condition')
1652 repo.ui.status(msg)
1633 repo.ui.status(msg)
1653
1634
1654 @reportsummary
1635 @reportsummary
1655 def reportphasechanges(repo, tr):
1636 def reportphasechanges(repo, tr):
1656 """Report statistics of phase changes for changesets pre-existing
1637 """Report statistics of phase changes for changesets pre-existing
1657 pull/unbundle.
1638 pull/unbundle.
1658 """
1639 """
1659 origrepolen = tr.changes.get('origrepolen', len(repo))
1640 origrepolen = tr.changes.get('origrepolen', len(repo))
1660 phasetracking = tr.changes.get('phases', {})
1641 phasetracking = tr.changes.get('phases', {})
1661 if not phasetracking:
1642 if not phasetracking:
1662 return
1643 return
1663 published = [
1644 published = [
1664 rev for rev, (old, new) in phasetracking.iteritems()
1645 rev for rev, (old, new) in phasetracking.iteritems()
1665 if new == phases.public and rev < origrepolen
1646 if new == phases.public and rev < origrepolen
1666 ]
1647 ]
1667 if not published:
1648 if not published:
1668 return
1649 return
1669 repo.ui.status(_('%d local changesets published\n')
1650 repo.ui.status(_('%d local changesets published\n')
1670 % len(published))
1651 % len(published))
1671
1652
1672 def getinstabilitymessage(delta, instability):
1653 def getinstabilitymessage(delta, instability):
1673 """function to return the message to show warning about new instabilities
1654 """function to return the message to show warning about new instabilities
1674
1655
1675 exists as a separate function so that extension can wrap to show more
1656 exists as a separate function so that extension can wrap to show more
1676 information like how to fix instabilities"""
1657 information like how to fix instabilities"""
1677 if delta > 0:
1658 if delta > 0:
1678 return _('%i new %s changesets\n') % (delta, instability)
1659 return _('%i new %s changesets\n') % (delta, instability)
1679
1660
1680 def nodesummaries(repo, nodes, maxnumnodes=4):
1661 def nodesummaries(repo, nodes, maxnumnodes=4):
1681 if len(nodes) <= maxnumnodes or repo.ui.verbose:
1662 if len(nodes) <= maxnumnodes or repo.ui.verbose:
1682 return ' '.join(short(h) for h in nodes)
1663 return ' '.join(short(h) for h in nodes)
1683 first = ' '.join(short(h) for h in nodes[:maxnumnodes])
1664 first = ' '.join(short(h) for h in nodes[:maxnumnodes])
1684 return _("%s and %d others") % (first, len(nodes) - maxnumnodes)
1665 return _("%s and %d others") % (first, len(nodes) - maxnumnodes)
1685
1666
1686 def enforcesinglehead(repo, tr, desc):
1667 def enforcesinglehead(repo, tr, desc):
1687 """check that no named branch has multiple heads"""
1668 """check that no named branch has multiple heads"""
1688 if desc in ('strip', 'repair'):
1669 if desc in ('strip', 'repair'):
1689 # skip the logic during strip
1670 # skip the logic during strip
1690 return
1671 return
1691 visible = repo.filtered('visible')
1672 visible = repo.filtered('visible')
1692 # possible improvement: we could restrict the check to affected branch
1673 # possible improvement: we could restrict the check to affected branch
1693 for name, heads in visible.branchmap().iteritems():
1674 for name, heads in visible.branchmap().iteritems():
1694 if len(heads) > 1:
1675 if len(heads) > 1:
1695 msg = _('rejecting multiple heads on branch "%s"')
1676 msg = _('rejecting multiple heads on branch "%s"')
1696 msg %= name
1677 msg %= name
1697 hint = _('%d heads: %s')
1678 hint = _('%d heads: %s')
1698 hint %= (len(heads), nodesummaries(repo, heads))
1679 hint %= (len(heads), nodesummaries(repo, heads))
1699 raise error.Abort(msg, hint=hint)
1680 raise error.Abort(msg, hint=hint)
1700
1681
1701 def wrapconvertsink(sink):
1682 def wrapconvertsink(sink):
1702 """Allow extensions to wrap the sink returned by convcmd.convertsink()
1683 """Allow extensions to wrap the sink returned by convcmd.convertsink()
1703 before it is used, whether or not the convert extension was formally loaded.
1684 before it is used, whether or not the convert extension was formally loaded.
1704 """
1685 """
1705 return sink
1686 return sink
1706
1687
1707 def unhidehashlikerevs(repo, specs, hiddentype):
1688 def unhidehashlikerevs(repo, specs, hiddentype):
1708 """parse the user specs and unhide changesets whose hash or revision number
1689 """parse the user specs and unhide changesets whose hash or revision number
1709 is passed.
1690 is passed.
1710
1691
1711 hiddentype can be: 1) 'warn': warn while unhiding changesets
1692 hiddentype can be: 1) 'warn': warn while unhiding changesets
1712 2) 'nowarn': don't warn while unhiding changesets
1693 2) 'nowarn': don't warn while unhiding changesets
1713
1694
1714 returns a repo object with the required changesets unhidden
1695 returns a repo object with the required changesets unhidden
1715 """
1696 """
1716 if not repo.filtername or not repo.ui.configbool('experimental',
1697 if not repo.filtername or not repo.ui.configbool('experimental',
1717 'directaccess'):
1698 'directaccess'):
1718 return repo
1699 return repo
1719
1700
1720 if repo.filtername not in ('visible', 'visible-hidden'):
1701 if repo.filtername not in ('visible', 'visible-hidden'):
1721 return repo
1702 return repo
1722
1703
1723 symbols = set()
1704 symbols = set()
1724 for spec in specs:
1705 for spec in specs:
1725 try:
1706 try:
1726 tree = revsetlang.parse(spec)
1707 tree = revsetlang.parse(spec)
1727 except error.ParseError: # will be reported by scmutil.revrange()
1708 except error.ParseError: # will be reported by scmutil.revrange()
1728 continue
1709 continue
1729
1710
1730 symbols.update(revsetlang.gethashlikesymbols(tree))
1711 symbols.update(revsetlang.gethashlikesymbols(tree))
1731
1712
1732 if not symbols:
1713 if not symbols:
1733 return repo
1714 return repo
1734
1715
1735 revs = _getrevsfromsymbols(repo, symbols)
1716 revs = _getrevsfromsymbols(repo, symbols)
1736
1717
1737 if not revs:
1718 if not revs:
1738 return repo
1719 return repo
1739
1720
1740 if hiddentype == 'warn':
1721 if hiddentype == 'warn':
1741 unfi = repo.unfiltered()
1722 unfi = repo.unfiltered()
1742 revstr = ", ".join([pycompat.bytestr(unfi[l]) for l in revs])
1723 revstr = ", ".join([pycompat.bytestr(unfi[l]) for l in revs])
1743 repo.ui.warn(_("warning: accessing hidden changesets for write "
1724 repo.ui.warn(_("warning: accessing hidden changesets for write "
1744 "operation: %s\n") % revstr)
1725 "operation: %s\n") % revstr)
1745
1726
1746 # we have to use new filtername to separate branch/tags cache until we can
1727 # we have to use new filtername to separate branch/tags cache until we can
1747 # disbale these cache when revisions are dynamically pinned.
1728 # disbale these cache when revisions are dynamically pinned.
1748 return repo.filtered('visible-hidden', revs)
1729 return repo.filtered('visible-hidden', revs)
1749
1730
1750 def _getrevsfromsymbols(repo, symbols):
1731 def _getrevsfromsymbols(repo, symbols):
1751 """parse the list of symbols and returns a set of revision numbers of hidden
1732 """parse the list of symbols and returns a set of revision numbers of hidden
1752 changesets present in symbols"""
1733 changesets present in symbols"""
1753 revs = set()
1734 revs = set()
1754 unfi = repo.unfiltered()
1735 unfi = repo.unfiltered()
1755 unficl = unfi.changelog
1736 unficl = unfi.changelog
1756 cl = repo.changelog
1737 cl = repo.changelog
1757 tiprev = len(unficl)
1738 tiprev = len(unficl)
1758 allowrevnums = repo.ui.configbool('experimental', 'directaccess.revnums')
1739 allowrevnums = repo.ui.configbool('experimental', 'directaccess.revnums')
1759 for s in symbols:
1740 for s in symbols:
1760 try:
1741 try:
1761 n = int(s)
1742 n = int(s)
1762 if n <= tiprev:
1743 if n <= tiprev:
1763 if not allowrevnums:
1744 if not allowrevnums:
1764 continue
1745 continue
1765 else:
1746 else:
1766 if n not in cl:
1747 if n not in cl:
1767 revs.add(n)
1748 revs.add(n)
1768 continue
1749 continue
1769 except ValueError:
1750 except ValueError:
1770 pass
1751 pass
1771
1752
1772 try:
1753 try:
1773 s = resolvehexnodeidprefix(unfi, s)
1754 s = resolvehexnodeidprefix(unfi, s)
1774 except (error.LookupError, error.WdirUnsupported):
1755 except (error.LookupError, error.WdirUnsupported):
1775 s = None
1756 s = None
1776
1757
1777 if s is not None:
1758 if s is not None:
1778 rev = unficl.rev(s)
1759 rev = unficl.rev(s)
1779 if rev not in cl:
1760 if rev not in cl:
1780 revs.add(rev)
1761 revs.add(rev)
1781
1762
1782 return revs
1763 return revs
1783
1764
1784 def bookmarkrevs(repo, mark):
1765 def bookmarkrevs(repo, mark):
1785 """
1766 """
1786 Select revisions reachable by a given bookmark
1767 Select revisions reachable by a given bookmark
1787 """
1768 """
1788 return repo.revs("ancestors(bookmark(%s)) - "
1769 return repo.revs("ancestors(bookmark(%s)) - "
1789 "ancestors(head() and not bookmark(%s)) - "
1770 "ancestors(head() and not bookmark(%s)) - "
1790 "ancestors(bookmark() and not bookmark(%s))",
1771 "ancestors(bookmark() and not bookmark(%s))",
1791 mark, mark, mark)
1772 mark, mark, mark)
@@ -1,221 +1,224 b''
1 # statichttprepo.py - simple http repository class for mercurial
1 # statichttprepo.py - simple http repository class for mercurial
2 #
2 #
3 # This provides read-only repo access to repositories exported via static http
3 # This provides read-only repo access to repositories exported via static http
4 #
4 #
5 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 from __future__ import absolute_import
10 from __future__ import absolute_import
11
11
12 import errno
12 import errno
13
13
14 from .i18n import _
14 from .i18n import _
15 from . import (
15 from . import (
16 changelog,
16 changelog,
17 error,
17 error,
18 localrepo,
18 localrepo,
19 manifest,
19 manifest,
20 namespaces,
20 namespaces,
21 pathutil,
21 pathutil,
22 scmutil,
23 store,
22 store,
24 url,
23 url,
25 util,
24 util,
26 vfs as vfsmod,
25 vfs as vfsmod,
27 )
26 )
28
27
29 urlerr = util.urlerr
28 urlerr = util.urlerr
30 urlreq = util.urlreq
29 urlreq = util.urlreq
31
30
32 class httprangereader(object):
31 class httprangereader(object):
33 def __init__(self, url, opener):
32 def __init__(self, url, opener):
34 # we assume opener has HTTPRangeHandler
33 # we assume opener has HTTPRangeHandler
35 self.url = url
34 self.url = url
36 self.pos = 0
35 self.pos = 0
37 self.opener = opener
36 self.opener = opener
38 self.name = url
37 self.name = url
39
38
40 def __enter__(self):
39 def __enter__(self):
41 return self
40 return self
42
41
43 def __exit__(self, exc_type, exc_value, traceback):
42 def __exit__(self, exc_type, exc_value, traceback):
44 self.close()
43 self.close()
45
44
46 def seek(self, pos):
45 def seek(self, pos):
47 self.pos = pos
46 self.pos = pos
48 def read(self, bytes=None):
47 def read(self, bytes=None):
49 req = urlreq.request(self.url)
48 req = urlreq.request(self.url)
50 end = ''
49 end = ''
51 if bytes:
50 if bytes:
52 end = self.pos + bytes - 1
51 end = self.pos + bytes - 1
53 if self.pos or end:
52 if self.pos or end:
54 req.add_header('Range', 'bytes=%d-%s' % (self.pos, end))
53 req.add_header('Range', 'bytes=%d-%s' % (self.pos, end))
55
54
56 try:
55 try:
57 f = self.opener.open(req)
56 f = self.opener.open(req)
58 data = f.read()
57 data = f.read()
59 code = f.code
58 code = f.code
60 except urlerr.httperror as inst:
59 except urlerr.httperror as inst:
61 num = inst.code == 404 and errno.ENOENT or None
60 num = inst.code == 404 and errno.ENOENT or None
62 raise IOError(num, inst)
61 raise IOError(num, inst)
63 except urlerr.urlerror as inst:
62 except urlerr.urlerror as inst:
64 raise IOError(None, inst.reason[1])
63 raise IOError(None, inst.reason[1])
65
64
66 if code == 200:
65 if code == 200:
67 # HTTPRangeHandler does nothing if remote does not support
66 # HTTPRangeHandler does nothing if remote does not support
68 # Range headers and returns the full entity. Let's slice it.
67 # Range headers and returns the full entity. Let's slice it.
69 if bytes:
68 if bytes:
70 data = data[self.pos:self.pos + bytes]
69 data = data[self.pos:self.pos + bytes]
71 else:
70 else:
72 data = data[self.pos:]
71 data = data[self.pos:]
73 elif bytes:
72 elif bytes:
74 data = data[:bytes]
73 data = data[:bytes]
75 self.pos += len(data)
74 self.pos += len(data)
76 return data
75 return data
77 def readlines(self):
76 def readlines(self):
78 return self.read().splitlines(True)
77 return self.read().splitlines(True)
79 def __iter__(self):
78 def __iter__(self):
80 return iter(self.readlines())
79 return iter(self.readlines())
81 def close(self):
80 def close(self):
82 pass
81 pass
83
82
84 # _RangeError and _HTTPRangeHandler were originally in byterange.py,
83 # _RangeError and _HTTPRangeHandler were originally in byterange.py,
85 # which was itself extracted from urlgrabber. See the last version of
84 # which was itself extracted from urlgrabber. See the last version of
86 # byterange.py from history if you need more information.
85 # byterange.py from history if you need more information.
87 class _RangeError(IOError):
86 class _RangeError(IOError):
88 """Error raised when an unsatisfiable range is requested."""
87 """Error raised when an unsatisfiable range is requested."""
89
88
90 class _HTTPRangeHandler(urlreq.basehandler):
89 class _HTTPRangeHandler(urlreq.basehandler):
91 """Handler that enables HTTP Range headers.
90 """Handler that enables HTTP Range headers.
92
91
93 This was extremely simple. The Range header is a HTTP feature to
92 This was extremely simple. The Range header is a HTTP feature to
94 begin with so all this class does is tell urllib2 that the
93 begin with so all this class does is tell urllib2 that the
95 "206 Partial Content" response from the HTTP server is what we
94 "206 Partial Content" response from the HTTP server is what we
96 expected.
95 expected.
97 """
96 """
98
97
99 def http_error_206(self, req, fp, code, msg, hdrs):
98 def http_error_206(self, req, fp, code, msg, hdrs):
100 # 206 Partial Content Response
99 # 206 Partial Content Response
101 r = urlreq.addinfourl(fp, hdrs, req.get_full_url())
100 r = urlreq.addinfourl(fp, hdrs, req.get_full_url())
102 r.code = code
101 r.code = code
103 r.msg = msg
102 r.msg = msg
104 return r
103 return r
105
104
106 def http_error_416(self, req, fp, code, msg, hdrs):
105 def http_error_416(self, req, fp, code, msg, hdrs):
107 # HTTP's Range Not Satisfiable error
106 # HTTP's Range Not Satisfiable error
108 raise _RangeError('Requested Range Not Satisfiable')
107 raise _RangeError('Requested Range Not Satisfiable')
109
108
110 def build_opener(ui, authinfo):
109 def build_opener(ui, authinfo):
111 # urllib cannot handle URLs with embedded user or passwd
110 # urllib cannot handle URLs with embedded user or passwd
112 urlopener = url.opener(ui, authinfo)
111 urlopener = url.opener(ui, authinfo)
113 urlopener.add_handler(_HTTPRangeHandler())
112 urlopener.add_handler(_HTTPRangeHandler())
114
113
115 class statichttpvfs(vfsmod.abstractvfs):
114 class statichttpvfs(vfsmod.abstractvfs):
116 def __init__(self, base):
115 def __init__(self, base):
117 self.base = base
116 self.base = base
118
117
119 def __call__(self, path, mode='r', *args, **kw):
118 def __call__(self, path, mode='r', *args, **kw):
120 if mode not in ('r', 'rb'):
119 if mode not in ('r', 'rb'):
121 raise IOError('Permission denied')
120 raise IOError('Permission denied')
122 f = "/".join((self.base, urlreq.quote(path)))
121 f = "/".join((self.base, urlreq.quote(path)))
123 return httprangereader(f, urlopener)
122 return httprangereader(f, urlopener)
124
123
125 def join(self, path):
124 def join(self, path):
126 if path:
125 if path:
127 return pathutil.join(self.base, path)
126 return pathutil.join(self.base, path)
128 else:
127 else:
129 return self.base
128 return self.base
130
129
131 return statichttpvfs
130 return statichttpvfs
132
131
133 class statichttppeer(localrepo.localpeer):
132 class statichttppeer(localrepo.localpeer):
134 def local(self):
133 def local(self):
135 return None
134 return None
136 def canpush(self):
135 def canpush(self):
137 return False
136 return False
138
137
139 class statichttprepository(localrepo.localrepository):
138 class statichttprepository(localrepo.localrepository):
140 supported = localrepo.localrepository._basesupported
139 supported = localrepo.localrepository._basesupported
141
140
142 def __init__(self, ui, path):
141 def __init__(self, ui, path):
143 self._url = path
142 self._url = path
144 self.ui = ui
143 self.ui = ui
145
144
146 self.root = path
145 self.root = path
147 u = util.url(path.rstrip('/') + "/.hg")
146 u = util.url(path.rstrip('/') + "/.hg")
148 self.path, authinfo = u.authinfo()
147 self.path, authinfo = u.authinfo()
149
148
150 vfsclass = build_opener(ui, authinfo)
149 vfsclass = build_opener(ui, authinfo)
151 self.vfs = vfsclass(self.path)
150 self.vfs = vfsclass(self.path)
152 self.cachevfs = vfsclass(self.vfs.join('cache'))
151 self.cachevfs = vfsclass(self.vfs.join('cache'))
153 self._phasedefaults = []
152 self._phasedefaults = []
154
153
155 self.names = namespaces.namespaces()
154 self.names = namespaces.namespaces()
156 self.filtername = None
155 self.filtername = None
157
156
158 try:
157 try:
159 requirements = scmutil.readrequires(self.vfs, self.supported)
158 requirements = set(self.vfs.read(b'requires').splitlines())
160 except IOError as inst:
159 except IOError as inst:
161 if inst.errno != errno.ENOENT:
160 if inst.errno != errno.ENOENT:
162 raise
161 raise
163 requirements = set()
162 requirements = set()
164
163
165 # check if it is a non-empty old-style repository
164 # check if it is a non-empty old-style repository
166 try:
165 try:
167 fp = self.vfs("00changelog.i")
166 fp = self.vfs("00changelog.i")
168 fp.read(1)
167 fp.read(1)
169 fp.close()
168 fp.close()
170 except IOError as inst:
169 except IOError as inst:
171 if inst.errno != errno.ENOENT:
170 if inst.errno != errno.ENOENT:
172 raise
171 raise
173 # we do not care about empty old-style repositories here
172 # we do not care about empty old-style repositories here
174 msg = _("'%s' does not appear to be an hg repository") % path
173 msg = _("'%s' does not appear to be an hg repository") % path
175 raise error.RepoError(msg)
174 raise error.RepoError(msg)
176
175
176 supportedrequirements = localrepo.gathersupportedrequirements(ui)
177 localrepo.ensurerequirementsrecognized(requirements,
178 supportedrequirements)
179
177 # setup store
180 # setup store
178 self.store = store.store(requirements, self.path, vfsclass)
181 self.store = store.store(requirements, self.path, vfsclass)
179 self.spath = self.store.path
182 self.spath = self.store.path
180 self.svfs = self.store.opener
183 self.svfs = self.store.opener
181 self.sjoin = self.store.join
184 self.sjoin = self.store.join
182 self._filecache = {}
185 self._filecache = {}
183 self.requirements = requirements
186 self.requirements = requirements
184
187
185 self.manifestlog = manifest.manifestlog(self.svfs, self)
188 self.manifestlog = manifest.manifestlog(self.svfs, self)
186 self.changelog = changelog.changelog(self.svfs)
189 self.changelog = changelog.changelog(self.svfs)
187 self._tags = None
190 self._tags = None
188 self.nodetagscache = None
191 self.nodetagscache = None
189 self._branchcaches = {}
192 self._branchcaches = {}
190 self._revbranchcache = None
193 self._revbranchcache = None
191 self.encodepats = None
194 self.encodepats = None
192 self.decodepats = None
195 self.decodepats = None
193 self._transref = None
196 self._transref = None
194
197
195 def _restrictcapabilities(self, caps):
198 def _restrictcapabilities(self, caps):
196 caps = super(statichttprepository, self)._restrictcapabilities(caps)
199 caps = super(statichttprepository, self)._restrictcapabilities(caps)
197 return caps.difference(["pushkey"])
200 return caps.difference(["pushkey"])
198
201
199 def url(self):
202 def url(self):
200 return self._url
203 return self._url
201
204
202 def local(self):
205 def local(self):
203 return False
206 return False
204
207
205 def peer(self):
208 def peer(self):
206 return statichttppeer(self)
209 return statichttppeer(self)
207
210
208 def wlock(self, wait=True):
211 def wlock(self, wait=True):
209 raise error.LockUnavailable(0, _('lock not available'), 'lock',
212 raise error.LockUnavailable(0, _('lock not available'), 'lock',
210 _('cannot lock static-http repository'))
213 _('cannot lock static-http repository'))
211
214
212 def lock(self, wait=True):
215 def lock(self, wait=True):
213 raise error.Abort(_('cannot lock static-http repository'))
216 raise error.Abort(_('cannot lock static-http repository'))
214
217
215 def _writecaches(self):
218 def _writecaches(self):
216 pass # statichttprepository are read only
219 pass # statichttprepository are read only
217
220
218 def instance(ui, path, create, intents=None, createopts=None):
221 def instance(ui, path, create, intents=None, createopts=None):
219 if create:
222 if create:
220 raise error.Abort(_('cannot create new static-http repository'))
223 raise error.Abort(_('cannot create new static-http repository'))
221 return statichttprepository(ui, path[7:])
224 return statichttprepository(ui, path[7:])
General Comments 0
You need to be logged in to leave comments. Login now